summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
commit574cc4539762561d96b456dbc0544d8898bd4c6e (patch)
tree07d84db8cf9fd30cbde6f539ce3a3f6116593e41 /drivers/gpu/drm/i915
parent3c2edc36a77420d8be05d656019dbc8c31535992 (diff)
parent945b584c94f8c665b2df3834a8a6a8faf256cd5f (diff)
Merge tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main pull request for 5.4-rc1 merge window. I don't think there is anything outstanding so next week should just be fixes, but we'll see if I missed anything. I landed some fixes earlier in the week but got delayed writing summary and sending it out, due to a mix of sick kid and jetlag! There are some fixes pending, but I'd rather get the main merge out of the way instead of delaying it longer. It's also pretty large in commit count and new amd header file size. The largest thing is four new amdgpu products (navi12/14, arcturus and renoir APU support). Otherwise it's pretty much lots of work across the board, i915 has started landing tigerlake support, lots of icelake fixes and lots of locking reworking for future gpu support, lots of header file rework (drmP.h is nearly gone), some old legacy hacks (DRM_WAIT_ON) have been put into the places they are needed. uapi: - content protection type property for HDCP core: - rework include dependencies - lots of drmP.h removals - link rate calculation robustness fix - make fb helper map only when required - add connector->DDC adapter link - DRM_WAIT_ON removed - drop DRM_AUTH usage from drivers dma-buf: - reservation object fence helper dma-fence: - shrink dma_fence struct - merge signal functions - store timestamps in dma_fence - selftests ttm: - embed drm_get_object struct into ttm_buffer_object - release_notify callback bridges: - sii902x - audio graph card support - tc358767 - aux data handling rework - ti-snd64dsi86 - debugfs support, DSI mode flags support panels: - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191, Boe Himax8279d, Sharp LD-D5116Z01B - TI nspire, NEC NL8048HL11, LG Philips LB035Q02, Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1 Toppoly TD043MTEA1 i915: - Initial tigerlake platform support - Locking simplification work, general all over refactoring. - Selftests - HDCP debug info improvements - DSI properties - Icelake display PLL fixes, colorspace fixes, bandwidth fixes, DSI suspend/resume - GuC fixes - Perf fixes - ElkhartLake enablement - DP MST fixes - GVT - command parser enhancements amdgpu: - add wipe memory on release flag for buffer creation - Navi12/14 support (may be marked experimental) - Arcturus support - Renoir APU support - mclk DPM for Navi - DC display fixes - Raven scatter/gather support - RAS support for GFX - Navi12 + Arcturus power features - GPU reset for Picasso - smu11 i2c controller support amdkfd: - navi12/14 support - Arcturus support radeon: - kexec fix nouveau: - improved display color management - detect lack of GPU power cables vmwgfx: - evicition priority support - remove unused security feature msm: - msm8998 display support - better async commit support for cursor updates etnaviv: - per-process address space support - performance counter fixes - softpin support mcde: - DCS transfers fix exynos: - drmP.h cleanup lima: - reduce logging kirin: - misc clenaups komeda: - dual-link support - DT memory regions hisilicon: - misc fixes imx: - IPUv3 image converter fixes - 32-bit RGB V4L2 pixel format support ingenic: - more support for panel related cases mgag200: - cursor support fix panfrost: - export GPU features register to userspace - gpu heap allocations - per-fd address space support pl111: - CLD pads wiring support removed from DT rockchip: - rework to use DRM PSR helpers - fix bug in VOP_WIN_GET macro - DSI DT binding rework sun4i: - improve support for color encoding and range - DDC enabled GPIO tinydrm: - rework SPI support - improve MIPI-DBI support - moved to drm/tiny vkms: - rework CRC tracking dw-hdmi: - get_eld and i2s improvements gm12u320: - misc fixes meson: - global code cleanup - vpu feature detect omap: - alpha/pixel blend mode properties rcar-du: - misc fixes" * tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm: (2112 commits) drm/nouveau/bar/gm20b: Avoid BAR1 teardown during init drm/nouveau: Fix ordering between TTM and GEM release drm/nouveau/prime: Extend DMA reservation object lock drm/nouveau: Fix fallout from reservation object rework drm/nouveau/kms/nv50-: Don't create MSTMs for eDP connectors drm/i915: Use NOEVICT for first pass on attemping to pin a GGTT mmap drm/i915: to make vgpu ppgtt notificaiton as atomic operation drm/i915: Flush the existing fence before GGTT read/write drm/i915: Hold irq-off for the entire fake lock period drm/i915/gvt: update RING_START reg of vGPU when the context is submitted to i915 drm/i915/gvt: update vgpu workload head pointer correctly drm/mcde: Fix DSI transfers drm/msm: Use the correct dma_sync calls harder drm/msm: remove unlikely() from WARN_ON() conditions drm/msm/dsi: Fix return value check for clk_get_parent drm/msm: add atomic traces drm/msm/dpu: async commit support drm/msm: async commit support drm/msm: split power control from prepare/complete_commit drm/msm: add kms->flush_commit() ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug16
-rw-r--r--drivers/gpu/drm/i915/Makefile93
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test22
-rw-r--r--drivers/gpu/drm/i915/display/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c244
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c195
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c465
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1365
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h239
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c779
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h73
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h (renamed from drivers/gpu/drm/i915/intel_drv.h)189
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c394
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c698
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c257
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h70
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c101
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c118
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c149
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c316
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c344
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c544
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c16
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c16
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile6
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c127
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c60
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c231
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c49
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c343
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c32
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c159
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c376
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c51
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c101
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.h31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h35
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c31
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c187
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c42
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c13
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c274
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c66
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c141
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c141
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h16
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen6.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen7.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen8.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen9_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen9.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c180
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h90
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c430
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c87
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c177
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool_types.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h129
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c303
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c268
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c455
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h44
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c84
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c109
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h102
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c71
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1377
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c218
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c (renamed from drivers/gpu/drm/i915/i915_gem_render_state.c)17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h (renamed from drivers/gpu/drm/i915/intel_renderstate.h)10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c633
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h75
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c339
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c (renamed from drivers/gpu/drm/i915/i915_timeline.c)304
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h94
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h (renamed from drivers/gpu/drm/i915/i915_timeline_types.h)28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c253
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c104
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c456
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c26
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c83
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c528
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c522
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c133
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c (renamed from drivers/gpu/drm/i915/selftests/i915_timeline.c)135
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c186
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c (renamed from drivers/gpu/drm/i915/selftests/mock_timeline.c)10
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.h (renamed from drivers/gpu/drm/i915/selftests/mock_timeline.h)6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c (renamed from drivers/gpu/drm/i915/intel_guc.c)320
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h (renamed from drivers/gpu/drm/i915/intel_guc.h)76
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c (renamed from drivers/gpu/drm/i915/intel_guc_ads.c)52
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c (renamed from drivers/gpu/drm/i915/intel_guc_ct.c)44
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h (renamed from drivers/gpu/drm/i915/intel_guc_ct.h)33
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c166
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h (renamed from drivers/gpu/drm/i915/intel_guc_fwif.h)104
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c (renamed from drivers/gpu/drm/i915/intel_guc_log.c)78
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h (renamed from drivers/gpu/drm/i915/intel_guc_log.h)24
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h (renamed from drivers/gpu/drm/i915/intel_guc_reg.h)62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c (renamed from drivers/gpu/drm/i915/intel_guc_submission.c)590
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h (renamed from drivers/gpu/drm/i915/intel_guc_submission.h)28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c (renamed from drivers/gpu/drm/i915/intel_huc.c)112
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c58
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h (renamed from drivers/gpu/drm/i915/intel_huc_fw.h)5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c627
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h67
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c616
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h241
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h82
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c (renamed from drivers/gpu/drm/i915/selftests/intel_guc.c)70
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c180
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c83
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c640
-rw-r--r--drivers/gpu/drm/i915/i915_active.h61
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h30
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c428
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h128
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c493
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c919
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h766
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c586
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c140
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c140
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2178
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h206
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h31
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c168
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c1
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c824
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h78
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1598
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h110
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.h32
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c5
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h15
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h15
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c65
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c836
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h32
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c298
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h15
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h7
-rw-r--r--drivers/gpu/drm/i915/i915_query.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h356
-rw-r--r--drivers/gpu/drm/i915/i915_request.c381
-rw-r--r--drivers/gpu/drm/i915/i915_request.h29
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c7
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h1
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h29
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c3
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c31
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h11
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c95
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.h44
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.h14
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h94
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h24
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c78
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h51
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c65
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c145
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h29
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c45
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h6
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.h34
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c308
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.h33
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h7
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h65
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c215
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c201
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h73
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c460
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c561
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h64
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c357
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h155
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c558
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h54
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c89
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h84
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c268
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.h18
-rw-r--r--drivers/gpu/drm/i915/oa/Makefile7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.c (renamed from drivers/gpu/drm/i915/i915_oa_bdw.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.c (renamed from drivers/gpu/drm/i915/i915_oa_bxt.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_cflgt2.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_cflgt3.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.c (renamed from drivers/gpu/drm/i915/i915_oa_chv.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.c (renamed from drivers/gpu/drm/i915/i915_oa_cnl.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.c (renamed from drivers/gpu/drm/i915/i915_oa_glk.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.c (renamed from drivers/gpu/drm/i915/i915_oa_hsw.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.c (renamed from drivers/gpu/drm/i915/i915_oa_icl.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_kblgt2.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_kblgt3.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt2.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt3.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h16
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt4.c)35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c127
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c720
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c89
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c67
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c38
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c4
365 files changed, 23856 insertions, 17160 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 8d922bb4d953..00786a142ff0 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -7,6 +7,7 @@ config DRM_I915_WERROR
# We use the dependency on !COMPILE_TEST to not be enabled in
# allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
+ select HEADER_TEST
default n
help
Add -Werror to the build flags for (and only for) i915.ko.
@@ -29,6 +30,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_SELFTEST
+ select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
@@ -94,6 +96,20 @@ config DRM_I915_TRACE_GEM
If in doubt, say "N".
+config DRM_I915_TRACE_GTT
+ bool "Insert extra ftrace output from the GTT internals"
+ depends on DRM_I915_DEBUG_GEM
+ select TRACING
+ default n
+ help
+ Enable additional and verbose debugging output that will spam
+ ordinary tests, but may be vital for post-mortem debugging when
+ used with /proc/sys/kernel/ftrace_dump_on_oops
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
bool "Enable additional driver debugging for fence objects"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 8cace65f50ce..658b930d34a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,22 +32,25 @@ subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
# Extra header tests
-include $(src)/Makefile.header-test
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-subdir-ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
i915-y += i915_drv.o \
i915_irq.o \
+ i915_getparam.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
i915_sysfs.o \
+ i915_utils.o \
intel_csr.o \
intel_device_info.o \
+ intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
@@ -59,6 +62,7 @@ i915-y += \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
+ i915_sw_fence_work.o \
i915_syncmap.o \
i915_user_extensions.o
@@ -72,17 +76,28 @@ gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
+ gt/intel_engine_pool.o \
gt/intel_engine_pm.o \
+ gt/intel_engine_user.o \
+ gt/intel_gt.o \
+ gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
+ gt/intel_gt_pm_irq.o \
gt/intel_hangcheck.o \
gt/intel_lrc.o \
+ gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ringbuffer.o \
gt/intel_mocs.o \
gt/intel_sseu.o \
+ gt/intel_timeline.o \
gt/intel_workarounds.o
-gt-$(CONFIG_DRM_I915_SELFTEST) += \
- gt/mock_engine.o
+# autogenerated null render state
+gt-y += \
+ gt/gen6_renderstate.o \
+ gt/gen7_renderstate.o \
+ gt/gen8_renderstate.o \
+ gt/gen9_renderstate.o
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
@@ -114,39 +129,32 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
+ i915_buddy.o \
i915_cmd_parser.o \
- i915_gem_batch_pool.o \
i915_gem_evict.o \
i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem.o \
- i915_gem_render_state.o \
i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
- i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
-i915-y += intel_uc.o \
- intel_uc_fw.o \
- intel_guc.o \
- intel_guc_ads.o \
- intel_guc_ct.o \
- intel_guc_fw.o \
- intel_guc_log.o \
- intel_guc_submission.o \
- intel_huc.o \
- intel_huc_fw.o
-
-# autogenerated null render state
-i915-y += intel_renderstate_gen6.o \
- intel_renderstate_gen7.o \
- intel_renderstate_gen8.o \
- intel_renderstate_gen9.o
+obj-y += gt/uc/
+i915-y += gt/uc/intel_uc.o \
+ gt/uc/intel_uc_fw.o \
+ gt/uc/intel_guc.o \
+ gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_submission.o \
+ gt/uc/intel_huc.o \
+ gt/uc/intel_huc_fw.o
# modesetting core code
obj-y += display/
@@ -173,7 +181,8 @@ i915-y += \
display/intel_overlay.o \
display/intel_psr.o \
display/intel_quirks.o \
- display/intel_sprite.o
+ display/intel_sprite.o \
+ display/intel_tc.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -210,6 +219,25 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
+# perf code
+obj-y += oa/
+i915-y += \
+ oa/i915_oa_hsw.o \
+ oa/i915_oa_bdw.o \
+ oa/i915_oa_chv.o \
+ oa/i915_oa_sklgt2.o \
+ oa/i915_oa_sklgt3.o \
+ oa/i915_oa_sklgt4.o \
+ oa/i915_oa_bxt.o \
+ oa/i915_oa_kblgt2.o \
+ oa/i915_oa_kblgt3.o \
+ oa/i915_oa_glk.o \
+ oa/i915_oa_cflgt2.o \
+ oa/i915_oa_cflgt3.o \
+ oa/i915_oa_cnl.o \
+ oa/i915_oa_icl.o
+i915-y += i915_perf.o
+
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
@@ -224,23 +252,6 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
# virtual gpu code
i915-y += i915_vgpu.o
-# perf code
-i915-y += i915_perf.o \
- i915_oa_hsw.o \
- i915_oa_bdw.o \
- i915_oa_chv.o \
- i915_oa_sklgt2.o \
- i915_oa_sklgt3.o \
- i915_oa_sklgt4.o \
- i915_oa_bxt.o \
- i915_oa_kblgt2.o \
- i915_oa_kblgt3.o \
- i915_oa_glk.o \
- i915_oa_cflgt2.o \
- i915_oa_cflgt3.o \
- i915_oa_cnl.o \
- i915_oa_icl.o
-
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
include $(src)/gvt/Makefile
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
deleted file mode 100644
index 7cde0ec34615..000000000000
--- a/drivers/gpu/drm/i915/Makefile.header-test
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header-test-$(CONFIG_DRM_I915_WERROR) := \
- i915_active_types.h \
- i915_debugfs.h \
- i915_drv.h \
- i915_irq.h \
- i915_params.h \
- i915_priolist_types.h \
- i915_reg.h \
- i915_scheduler_types.h \
- i915_timeline_types.h \
- i915_utils.h \
- intel_csr.h \
- intel_drv.h \
- intel_pm.h \
- intel_runtime_pm.h \
- intel_sideband.h \
- intel_uncore.h \
- intel_wakeref.h
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
index 1c75b5c9790c..173c305d7866 100644
--- a/drivers/gpu/drm/i915/display/Makefile
+++ b/drivers/gpu/drm/i915/display/Makefile
@@ -1,2 +1,6 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
# Extra header tests
-include $(src)/Makefile.header-test
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
+header-test- := intel_vbt_defs.h
diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test
deleted file mode 100644
index fc7d4e5bd2c6..000000000000
--- a/drivers/gpu/drm/i915/display/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h)))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 602380fe74f3..0589994dde11 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,7 +25,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7017_TV_DISPLAY_MODE 0x00
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index e070bebee7b5..54f58ba44b9f 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7xxx_REG_VID 0x4a
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 09dba35f3ffa..f43d8c610d3f 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,7 +29,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/*
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index c83a5d88d62b..a724a8755673 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -28,7 +28,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define NS2501_VID 0x1305
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 04698eaeb632..0dfa0a0209ff 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define SIL164_VID 0x0001
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 623114ee73cd..009d65b0f3e9 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,7 +25,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/* register definitions according to the TFP410 data sheet */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 74448e6bf749..6e398c33a524 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -202,63 +202,62 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ enum phy phy;
u32 tmp;
int lane;
- for_each_dsi_port(port, intel_dsi->ports) {
-
+ for_each_dsi_phy(phy, intel_dsi->phys) {
/*
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
}
@@ -364,10 +363,10 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ enum phy phy;
- for_each_dsi_port(port, intel_dsi->ports)
- intel_combo_phy_power_up_lanes(dev_priv, port, true,
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_combo_phy_power_up_lanes(dev_priv, phy, true,
intel_dsi->lane_count, false);
}
@@ -375,34 +374,47 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ enum phy phy;
u32 tmp;
int lane;
/* Step 4b(i) set loadgen select for transmit and aux lanes */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+
+ /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
+ if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0);
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0x1);
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ }
}
}
@@ -412,16 +424,16 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
- enum port port;
+ enum phy phy;
/* clear common keeper enable bit */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
}
/*
@@ -429,33 +441,33 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_CL_DW5(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_CL_DW5(phy));
tmp |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
}
/* Clear training enable to change swing values */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
/* Program swing and de-emphasis */
dsi_program_swing_and_deemphasis(encoder);
/* Set training enable to trigger update */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
}
@@ -484,6 +496,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
+ enum phy phy;
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -517,18 +530,28 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
- if (intel_dsi_bitrate(intel_dsi) <= 800000) {
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
-
- /* shadow register inside display core */
- tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ if (IS_GEN(dev_priv, 11)) {
+ if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+ /* shadow register inside display core */
+ tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ }
+ }
+ }
+
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_DPHY_CHKN(phy));
+ tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
+ I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
}
}
}
@@ -538,15 +561,14 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
- enum port port;
+ enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- }
+ tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -555,15 +577,14 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
- enum port port;
+ enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- }
+ tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -573,24 +594,27 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum port port;
+ enum phy phy;
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- for_each_dsi_port(port, intel_dsi->ports) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ else
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
+ POSTING_READ(ICL_DPCLKA_CFGCR0);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -661,6 +685,11 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
break;
}
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (is_vid_mode(intel_dsi))
+ tmp |= BLANKING_PACKET_ENABLE;
+ }
+
/* program DSI operation mode */
if (is_vid_mode(intel_dsi)) {
tmp &= ~OP_MODE_MASK;
@@ -744,7 +773,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
enum transcoder dsi_trans;
/* horizontal timings */
u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
- u16 hfront_porch, hback_porch;
+ u16 hback_porch;
/* vertical timings */
u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
@@ -753,8 +782,6 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
hsync_start = adjusted_mode->crtc_hsync_start;
hsync_end = adjusted_mode->crtc_hsync_end;
hsync_size = hsync_end - hsync_start;
- hfront_porch = (adjusted_mode->crtc_hsync_start -
- adjusted_mode->crtc_hdisplay);
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
@@ -845,6 +872,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
}
+
+ /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VBLANK(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+ }
}
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
@@ -862,10 +898,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
- if (intel_wait_for_register(&dev_priv->uncore,
- PIPECONF(dsi_trans),
- I965_PIPECONF_ACTIVE,
- I965_PIPECONF_ACTIVE, 10))
+ if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
}
}
@@ -923,6 +957,8 @@ static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
@@ -945,7 +981,8 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, pipe_config);
/* Step 4l: Gate DDI clocks */
- gen11_dsi_gate_clocks(encoder);
+ if (IS_GEN(dev_priv, 11))
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1041,9 +1078,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
- if (intel_wait_for_register(&dev_priv->uncore,
- PIPECONF(dsi_trans),
- I965_PIPECONF_ACTIVE, 0, 50))
+ if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
}
@@ -1487,6 +1523,26 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
intel_dsi_log_params(intel_dsi);
}
+static void icl_dsi_add_properties(struct intel_connector *connector)
+{
+ u32 allowed_scalers;
+
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
+ BIT(DRM_MODE_SCALE_FULLSCREEN) |
+ BIT(DRM_MODE_SCALE_CENTER);
+
+ drm_connector_attach_scaling_mode_property(&connector->base,
+ allowed_scalers);
+
+ connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ connector->base.display_info.panel_orientation =
+ intel_dsi_get_panel_orientation(connector);
+ drm_connector_init_panel_orientation_property(&connector->base,
+ connector->panel.fixed_mode->hdisplay,
+ connector->panel.fixed_mode->vdisplay);
+}
+
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1580,6 +1636,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
icl_dphy_param_init(intel_dsi);
+
+ icl_dsi_add_properties(intel_connector);
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 90ca11a4ae88..d3fb75bb9eb1 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -35,7 +35,7 @@
#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sprite.h"
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 30bd4e76fff9..d1fcdf206da4 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -35,8 +35,9 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "i915_trace.h"
#include "intel_atomic_plane.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sprite.h"
@@ -176,33 +177,49 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->data_rate[plane->id] =
intel_plane_data_rate(new_crtc_state, new_plane_state);
- return intel_plane_atomic_calc_changes(old_crtc_state,
- &new_crtc_state->base,
- old_plane_state,
- &new_plane_state->base);
+ return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
}
-static int intel_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *new_plane_state)
+static struct intel_crtc *
+get_crtc_from_states(const struct intel_plane_state *old_plane_state,
+ const struct intel_plane_state *new_plane_state)
{
- struct drm_atomic_state *state = new_plane_state->state;
- const struct drm_plane_state *old_plane_state =
- drm_atomic_get_old_plane_state(state, plane);
- struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
- const struct drm_crtc_state *old_crtc_state;
- struct drm_crtc_state *new_crtc_state;
-
- new_plane_state->visible = false;
+ if (new_plane_state->base.crtc)
+ return to_intel_crtc(new_plane_state->base.crtc);
+
+ if (old_plane_state->base.crtc)
+ return to_intel_crtc(old_plane_state->base.crtc);
+
+ return NULL;
+}
+
+static int intel_plane_atomic_check(struct drm_plane *_plane,
+ struct drm_plane_state *_new_plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(_new_plane_state->state);
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct intel_crtc *crtc =
+ get_crtc_from_states(old_plane_state, new_plane_state);
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+
+ new_plane_state->base.visible = false;
if (!crtc)
return 0;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
- to_intel_crtc_state(new_crtc_state),
- to_intel_plane_state(old_plane_state),
- to_intel_plane_state(new_plane_state));
+ return intel_plane_atomic_check_with_state(old_crtc_state,
+ new_crtc_state,
+ old_plane_state,
+ new_plane_state);
}
static struct intel_plane *
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 1437a8797e10..cb7ef4f9eafd 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-struct drm_crtc_state;
struct drm_plane;
struct drm_property;
struct intel_atomic_state;
@@ -43,8 +42,8 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state);
+ struct intel_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 840daff12246..ddcccf4408c3 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -29,7 +29,7 @@
#include "i915_drv.h"
#include "intel_audio.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_lpe_audio.h"
/**
@@ -72,6 +72,13 @@ struct dp_aud_n_m {
u16 n;
};
+struct hdmi_aud_ncts {
+ int sample_rate;
+ int clock;
+ int n;
+ int cts;
+};
+
/* Values according to DP 1.4 Table 2-104 */
static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 32000, LC_162M, 1024, 10125 },
@@ -148,12 +155,7 @@ static const struct {
#define TMDS_594M 594000
#define TMDS_593M 593407
-static const struct {
- int sample_rate;
- int clock;
- int n;
- int cts;
-} hdmi_aud_ncts[] = {
+static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = {
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
{ 32000, TMDS_593M, 5824, 843750 },
@@ -184,6 +186,49 @@ static const struct {
{ 192000, TMDS_594M, 24576, 594000 },
};
+/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/
+/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/
+#define TMDS_371M 371250
+#define TMDS_370M 370878
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = {
+ { 32000, TMDS_370M, 5824, 527344 },
+ { 32000, TMDS_371M, 6144, 556875 },
+ { 44100, TMDS_370M, 8918, 585938 },
+ { 44100, TMDS_371M, 4704, 309375 },
+ { 88200, TMDS_370M, 17836, 585938 },
+ { 88200, TMDS_371M, 9408, 309375 },
+ { 176400, TMDS_370M, 35672, 585938 },
+ { 176400, TMDS_371M, 18816, 309375 },
+ { 48000, TMDS_370M, 11648, 703125 },
+ { 48000, TMDS_371M, 5120, 309375 },
+ { 96000, TMDS_370M, 23296, 703125 },
+ { 96000, TMDS_371M, 10240, 309375 },
+ { 192000, TMDS_370M, 46592, 703125 },
+ { 192000, TMDS_371M, 20480, 309375 },
+};
+
+/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/
+#define TMDS_445_5M 445500
+#define TMDS_445M 445054
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
+ { 32000, TMDS_445M, 5824, 632813 },
+ { 32000, TMDS_445_5M, 4096, 445500 },
+ { 44100, TMDS_445M, 8918, 703125 },
+ { 44100, TMDS_445_5M, 4704, 371250 },
+ { 88200, TMDS_445M, 17836, 703125 },
+ { 88200, TMDS_445_5M, 9408, 371250 },
+ { 176400, TMDS_445M, 35672, 703125 },
+ { 176400, TMDS_445_5M, 18816, 371250 },
+ { 48000, TMDS_445M, 5824, 421875 },
+ { 48000, TMDS_445_5M, 5120, 371250 },
+ { 96000, TMDS_445M, 11648, 421875 },
+ { 96000, TMDS_445_5M, 10240, 371250 },
+ { 192000, TMDS_445M, 23296, 421875 },
+ { 192000, TMDS_445_5M, 20480, 371250 },
+};
+
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
@@ -212,14 +257,24 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
int rate)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- int i;
+ const struct hdmi_aud_ncts *hdmi_ncts_table;
+ int i, size;
+
+ if (crtc_state->pipe_bpp == 36) {
+ hdmi_ncts_table = hdmi_aud_ncts_36bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_36bpp);
+ } else if (crtc_state->pipe_bpp == 30) {
+ hdmi_ncts_table = hdmi_aud_ncts_30bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_30bpp);
+ } else {
+ hdmi_ncts_table = hdmi_aud_ncts_24bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_24bpp);
+ }
- for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
- if (rate == hdmi_aud_ncts[i].sample_rate &&
- adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
- return hdmi_aud_ncts[i].n;
+ for (i = 0; i < size; i++) {
+ if (rate == hdmi_ncts_table[i].sample_rate &&
+ crtc_state->port_clock == hdmi_ncts_table[i].clock) {
+ return hdmi_ncts_table[i].n;
}
}
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3ef4e9f573cf..efb39f350b19 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -28,6 +28,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/i915_drm.h>
+#include "display/intel_display.h"
#include "display/intel_gmbus.h"
#include "i915_drv.h"
@@ -1342,16 +1343,13 @@ static const u8 cnp_ddc_pin_map[] = {
static const u8 icp_ddc_pin_map[] = {
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
[ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
[ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
[ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
-};
-
-static const u8 mcc_ddc_pin_map[] = {
- [MCC_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [MCC_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+ [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
+ [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
@@ -1359,10 +1357,7 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
const u8 *ddc_pin_map;
int n_entries;
- if (HAS_PCH_MCC(dev_priv)) {
- ddc_pin_map = mcc_ddc_pin_map;
- n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
- } else if (HAS_PCH_ICP(dev_priv)) {
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
} else if (HAS_PCH_CNP(dev_priv)) {
@@ -1668,6 +1663,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!child->device_type)
continue;
+ DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
+ child->device_type);
+
/*
* Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device. Accessing the data must
@@ -1730,12 +1728,13 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
+ enum phy phy = intel_port_to_phy(dev_priv, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
- if (intel_port_is_tc(dev_priv, port))
+ if (intel_phy_is_tc(dev_priv, phy))
continue;
info->supports_dvi = (port != PORT_A && port != PORT_E);
@@ -1888,10 +1887,10 @@ out:
}
/**
- * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @dev_priv: i915 device instance
*/
-void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
{
kfree(dev_priv->vbt.child_dev);
dev_priv->vbt.child_dev = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4e42cfaf61a7..4969189e620f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -42,6 +42,7 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_DISPLAY_DDI,
INTEL_BACKLIGHT_DSI_DCS,
INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
struct edp_power_seq {
@@ -227,7 +228,7 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
+void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 7b908e10d32e..688858ebe4d0 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -6,7 +6,7 @@
#include <drm/drm_atomic_state_helper.h>
#include "intel_bw.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
@@ -65,7 +65,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
{
- u32 val = 0, val2;
+ u32 val = 0, val2 = 0;
int ret;
ret = sandybridge_pcode_read(dev_priv,
@@ -322,6 +322,20 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
return data_rate;
}
+static struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_private_state *bw_state;
+
+ bw_state = drm_atomic_get_private_obj_state(&state->base,
+ &dev_priv->bw_obj);
+ if (IS_ERR(bw_state))
+ return ERR_CAST(bw_state);
+
+ return to_intel_bw_state(bw_state);
+}
+
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index e9d9c6d63bc3..9db10af012f4 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,7 +8,6 @@
#include <drm/drm_atomic.h>
-#include "i915_drv.h"
#include "intel_display.h"
struct drm_i915_private;
@@ -24,20 +23,6 @@ struct intel_bw_state {
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
-static inline struct intel_bw_state *
-intel_atomic_get_bw_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_private_state *bw_state;
-
- bw_state = drm_atomic_get_private_obj_state(&state->base,
- &dev_priv->bw_obj);
- if (IS_ERR(bw_state))
- return ERR_CAST(bw_state);
-
- return to_intel_bw_state(bw_state);
-}
-
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0d19bbd08122..d0bc42e5039c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -22,7 +22,7 @@
*/
#include "intel_cdclk.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_sideband.h"
/**
@@ -545,10 +545,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
- * a system suspend. So grab the PIPE-A domain, which covers
+ * a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) |
@@ -606,7 +606,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -631,10 +631,10 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
- * a system suspend. So grab the PIPE-A domain, which covers
+ * a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -653,7 +653,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -969,9 +969,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
- 5))
+ if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
DRM_ERROR("DPLL0 not locked\n");
dev_priv->cdclk.hw.vco = vco;
@@ -983,9 +981,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1309,9 +1305,8 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1330,11 +1325,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DE_PLL_ENABLE,
- BXT_DE_PLL_LOCK,
- BXT_DE_PLL_LOCK,
- 1))
+ if (intel_de_wait_for_set(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
@@ -1756,9 +1748,10 @@ sanitize:
static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
{
- int ranges_24[] = { 312000, 552000, 648000 };
- int ranges_19_38[] = { 307200, 556800, 652800 };
- int *ranges;
+ static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 };
+ static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 };
+ const int *ranges;
+ int len, i;
switch (ref) {
default:
@@ -1766,19 +1759,22 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
/* fall through */
case 24000:
ranges = ranges_24;
+ len = ARRAY_SIZE(ranges_24);
break;
case 19200:
case 38400:
ranges = ranges_19_38;
+ len = ARRAY_SIZE(ranges_19_38);
break;
}
- if (min_cdclk > ranges[1])
- return ranges[2];
- else if (min_cdclk > ranges[0])
- return ranges[1];
- else
- return ranges[0];
+ for (i = 0; i < len; i++) {
+ if (min_cdclk <= ranges[i])
+ return ranges[i];
+ }
+
+ WARN_ON(min_cdclk > ranges[len - 1]);
+ return ranges[len - 1];
}
static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
@@ -1792,16 +1788,24 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
default:
MISSING_CASE(cdclk);
/* fall through */
+ case 172800:
case 307200:
case 556800:
case 652800:
WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
dev_priv->cdclk.hw.ref != 38400);
break;
+ case 180000:
case 312000:
case 552000:
case 648000:
WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+ break;
+ case 192000:
+ WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
+ dev_priv->cdclk.hw.ref != 38400 &&
+ dev_priv->cdclk.hw.ref != 24000);
+ break;
}
ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
@@ -1854,14 +1858,23 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
-static u8 icl_calc_voltage_level(int cdclk)
+static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
{
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (cdclk > 312000)
+ return 2;
+ else if (cdclk > 180000)
+ return 1;
+ else
+ return 0;
+ } else {
+ if (cdclk > 556800)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+ }
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1912,7 +1925,7 @@ out:
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
- icl_calc_voltage_level(cdclk_state->cdclk);
+ icl_calc_voltage_level(dev_priv, cdclk_state->cdclk);
}
static void icl_init_cdclk(struct drm_i915_private *dev_priv)
@@ -1947,7 +1960,8 @@ sanitize:
sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
sanitized_state.cdclk);
sanitized_state.voltage_level =
- icl_calc_voltage_level(sanitized_state.cdclk);
+ icl_calc_voltage_level(dev_priv,
+ sanitized_state.cdclk);
icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
}
@@ -1958,7 +1972,8 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
- cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv,
+ cdclk_state.cdclk);
icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
@@ -2560,7 +2575,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.vco = vco;
state->cdclk.logical.cdclk = cdclk;
state->cdclk.logical.voltage_level =
- max(icl_calc_voltage_level(cdclk),
+ max(icl_calc_voltage_level(dev_priv, cdclk),
cnl_compute_min_voltage_level(state));
if (!state->active_crtcs) {
@@ -2570,7 +2585,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.actual.vco = vco;
state->cdclk.actual.cdclk = cdclk;
state->cdclk.actual.voltage_level =
- icl_calc_voltage_level(cdclk);
+ icl_calc_voltage_level(dev_priv, cdclk);
} else {
state->cdclk.actual = state->cdclk.logical;
}
@@ -2605,7 +2620,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (dev_priv->cdclk.hw.ref == 24000)
+ dev_priv->max_cdclk_freq = 552000;
+ else
+ dev_priv->max_cdclk_freq = 556800;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 23a84dd7989f..71a0201437a9 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -23,7 +23,7 @@
*/
#include "intel_color.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#define CTM_COEFF_SIGN (1ULL << 63)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 841708da5a56..44bbc7e74fc3 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -4,15 +4,15 @@
*/
#include "intel_combo_phy.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
-#define for_each_combo_port(__dev_priv, __port) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if(intel_port_is_combophy(__dev_priv, __port))
+#define for_each_combo_phy(__dev_priv, __phy) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
-#define for_each_combo_port_reverse(__dev_priv, __port) \
- for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
- for_each_if(intel_port_is_combophy(__dev_priv, __port))
+#define for_each_combo_phy_reverse(__dev_priv, __phy) \
+ for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
enum {
PROCMON_0_85V_DOT_0,
@@ -38,18 +38,17 @@ static const struct cnl_procmon {
};
/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
+ * CNL has just one set of registers, while gen11 has a set for each combo PHY.
+ * The CNL registers are equivalent to the gen11 PHY A registers, that's why we
+ * call the ICL macros even though the function has CNL on its name.
*/
static const struct cnl_procmon *
-cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(ICL_PORT_COMP_DW3(port));
+ val = I915_READ(ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -75,32 +74,32 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
}
static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+ procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- val = I915_READ(ICL_PORT_COMP_DW1(port));
+ val = I915_READ(ICL_PORT_COMP_DW1(phy));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
- I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+ I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
- enum port port, i915_reg_t reg, u32 mask,
+ enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
u32 val = I915_READ(reg);
if ((val & mask) != expected_val) {
- DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+ DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
"current %08x mask %08x expected %08x\n",
- port_name(port),
+ phy_name(phy),
reg.reg, val, mask, expected_val);
return false;
}
@@ -109,18 +108,18 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv,
}
static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
const struct cnl_procmon *procmon;
bool ret;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+ procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+ ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
-1U, procmon->dw9);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy),
-1U, procmon->dw10);
return ret;
@@ -134,15 +133,15 @@ static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
{
- enum port port = PORT_A;
+ enum phy phy = PHY_A;
bool ret;
if (!cnl_combo_phy_enabled(dev_priv))
return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+ ret &= check_phy_reg(dev_priv, phy, CNL_PORT_CL1CM_DW5,
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
@@ -157,7 +156,7 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
- cnl_set_procmon_ref_values(dev_priv, PORT_A);
+ cnl_set_procmon_ref_values(dev_priv, PHY_A);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -181,35 +180,39 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
- return !(I915_READ(ICL_PHY_MISC(port)) &
- ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+ /* The PHY C added by EHL has no PHY_MISC register */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ else
+ return !(I915_READ(ICL_PHY_MISC(phy)) &
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+ (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
bool ret;
- if (!icl_combo_phy_enabled(dev_priv, port))
+ if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (port == PORT_A)
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
+ if (phy == PHY_A)
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
- enum port port, bool is_dsi,
+ enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
@@ -254,66 +257,120 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = I915_READ(ICL_PORT_CL_DW10(port));
+ val = I915_READ(ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
- I915_WRITE(ICL_PORT_CL_DW10(port), val);
+ I915_WRITE(ICL_PORT_CL_DW10(phy), val);
+}
+
+static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
+{
+ bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
+ bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
+ bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+ /*
+ * VBT's 'dvo port' field for child devices references the DDI, not
+ * the PHY. So if combo PHY A is wired up to drive an external
+ * display, we should see a child device present on PORT_D and
+ * nothing on PORT_A and no DSI.
+ */
+ if (ddi_d_present && !ddi_a_present && !dsi_present)
+ return val | ICL_PHY_MISC_MUX_DDID;
+
+ /*
+ * If we encounter a VBT that claims to have an external display on
+ * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+ * in the log and let the internal display win.
+ */
+ if (ddi_d_present)
+ DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
+
+ return val & ~ICL_PHY_MISC_MUX_DDID;
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
{
- enum port port;
+ enum phy phy;
- for_each_combo_port(dev_priv, port) {
+ for_each_combo_phy(dev_priv, phy) {
u32 val;
- if (icl_combo_phy_verify_state(dev_priv, port)) {
- DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
- port_name(port));
+ if (icl_combo_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
+ phy_name(phy));
continue;
}
- val = I915_READ(ICL_PHY_MISC(port));
+ /*
+ * Although EHL adds a combo PHY C, there's no PHY_MISC
+ * register for it and no need to program the
+ * DE_IO_COMP_PWR_DOWN setting on PHY C.
+ */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ goto skip_phy_misc;
+
+ /*
+ * EHL's combo PHY A can be hooked up to either an external
+ * display (via DDI-D) or an internal display (via DDI-A or
+ * the DSI DPHY). This is a motherboard design decision that
+ * can't be changed on the fly, so initialize the PHY's mux
+ * based on whether our VBT indicates the presence of any
+ * "internal" child devices.
+ */
+ val = I915_READ(ICL_PHY_MISC(phy));
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
+ val = ehl_combo_phy_a_mux(dev_priv, val);
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
+ I915_WRITE(ICL_PHY_MISC(phy), val);
- cnl_set_procmon_ref_values(dev_priv, port);
+skip_phy_misc:
+ cnl_set_procmon_ref_values(dev_priv, phy);
- if (port == PORT_A) {
- val = I915_READ(ICL_PORT_COMP_DW8(port));
+ if (phy == PHY_A) {
+ val = I915_READ(ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
- I915_WRITE(ICL_PORT_COMP_DW8(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
}
- val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val = I915_READ(ICL_PORT_COMP_DW0(phy));
val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
- val = I915_READ(ICL_PORT_CL_DW5(port));
+ val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), val);
}
}
static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
- enum port port;
+ enum phy phy;
- for_each_combo_port_reverse(dev_priv, port) {
+ for_each_combo_phy_reverse(dev_priv, phy) {
u32 val;
- if (port == PORT_A &&
- !icl_combo_phy_verify_state(dev_priv, port))
- DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
- port_name(port));
+ if (phy == PHY_A &&
+ !icl_combo_phy_verify_state(dev_priv, phy))
+ DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+
+ /*
+ * Although EHL adds a combo PHY C, there's no PHY_MISC
+ * register for it and no need to program the
+ * DE_IO_COMP_PWR_DOWN setting on PHY C.
+ */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ goto skip_phy_misc;
- val = I915_READ(ICL_PHY_MISC(port));
+ val = I915_READ(ICL_PHY_MISC(phy));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
+ I915_WRITE(ICL_PHY_MISC(phy), val);
- val = I915_READ(ICL_PORT_COMP_DW0(port));
+skip_phy_misc:
+ val = I915_READ(ICL_PORT_COMP_DW0(phy));
val &= ~COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
index e6e195a83b19..660886f86c59 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
@@ -7,14 +7,14 @@
#define __INTEL_COMBO_PHY_H__
#include <linux/types.h>
-#include <drm/i915_drm.h>
struct drm_i915_private;
+enum phy;
void intel_combo_phy_init(struct drm_i915_private *dev_priv);
void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
- enum port port, bool is_dsi,
+ enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal);
#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 41310f8e5a2a..308ec63207ee 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -33,7 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
int intel_connector_init(struct intel_connector *connector)
@@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector)
if (ret)
goto err;
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure(to_i915(connector->dev))) {
ret = -EFAULT;
goto err_backlight;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 3fcf2f84bcce..e6e8d4a82044 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,7 +38,7 @@
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_ddi.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -443,9 +443,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(&dev_priv->uncore,
+ if (intel_de_wait_for_clear(dev_priv,
crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
@@ -497,10 +497,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(&dev_priv->uncore,
- crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
- 1000)) {
+ if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(crt->adpa_reg, save_adpa);
}
@@ -550,9 +548,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
- CRT_HOTPLUG_FORCE_DETECT, 0,
- 1000))
+ if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
+ CRT_HOTPLUG_FORCE_DETECT, 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 1cb1fa74cfbc..8eb2b3ec01ed 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -32,10 +32,10 @@
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
@@ -45,6 +45,7 @@
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_tc.h"
#include "intel_vdsc.h"
struct ddi_buf_trans {
@@ -846,8 +847,8 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
- int type, int rate, int *n_entries)
+icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
@@ -867,12 +868,13 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
int n_entries, level, default_entry;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
if (INTEL_GEN(dev_priv) >= 11) {
- if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -1486,9 +1488,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int link_clock;
- if (intel_port_is_combophy(dev_priv, port)) {
+ if (intel_phy_is_combo(dev_priv, phy)) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
@@ -1770,7 +1773,10 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- temp |= TRANS_DDI_SELECT_PORT(port);
+ if (INTEL_GEN(dev_priv) >= 12)
+ temp |= TGL_TRANS_DDI_SELECT_PORT(port);
+ else
+ temp |= TRANS_DDI_SELECT_PORT(port);
switch (crtc_state->pipe_bpp) {
case 18:
@@ -1850,8 +1856,13 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
u32 val = I915_READ(reg);
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- val |= TRANS_DDI_PORT_NONE;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ } else {
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ }
I915_WRITE(reg, val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
@@ -2003,10 +2014,27 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
+ unsigned int port_mask, ddi_select;
+ intel_wakeref_t trans_wakeref;
+
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ if (!trans_wakeref)
+ continue;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ port_mask = TGL_TRANS_DDI_PORT_MASK;
+ ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
+ } else {
+ port_mask = TRANS_DDI_PORT_MASK;
+ ddi_select = TRANS_DDI_SELECT_PORT(port);
+ }
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
+ trans_wakeref);
- if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+ if ((tmp & port_mask) != ddi_select)
continue;
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
@@ -2085,6 +2113,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
/*
* TODO: Add support for MST encoders. Atm, the following should never
@@ -2102,7 +2131,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ intel_phy_is_tc(dev_priv, phy))
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
@@ -2122,9 +2151,14 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_PORT(port));
+ else
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+ }
}
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2132,9 +2166,14 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_DISABLED);
+ else
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+ }
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -2227,11 +2266,12 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
if (INTEL_GEN(dev_priv) >= 11) {
- if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port, encoder->type,
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -2413,15 +2453,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type,
+ u32 level, enum phy phy, int type,
int rate)
{
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
- ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
- rate, &n_entries);
+ ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
if (!ddi_translations)
return;
@@ -2431,41 +2471,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
}
/* Set PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2473,7 +2513,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int width = 0;
int rate = 0;
u32 val;
@@ -2494,12 +2534,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
if (type == INTEL_OUTPUT_HDMI)
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val);
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -2509,33 +2549,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(ICL_PORT_CL_DW5(port));
+ val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
+ icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
/* 6. Set training enable to trigger update */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2663,9 +2703,9 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (intel_port_is_combophy(dev_priv, port))
+ if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
@@ -2728,12 +2768,13 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp)
static inline
u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
- if (intel_port_is_combophy(dev_priv, port)) {
- return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- } else if (intel_port_is_tc(dev_priv, port)) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ } else if (intel_phy_is_tc(dev_priv, phy)) {
+ enum tc_port tc_port = intel_port_to_tc(dev_priv,
+ (enum port)phy);
return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
}
@@ -2746,23 +2787,33 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ /*
+ * Even though this register references DDIs, note that we
+ * want to pass the PHY rather than the port (DDI). For
+ * ICL, port=phy in all cases so it doesn't matter, but for
+ * EHL the bspec notes the following:
+ *
+ * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA
+ * Clock Select chooses the PLL for both DDIA and DDID and
+ * drives port A in all cases."
+ */
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ POSTING_READ(ICL_DPCLKA_CFGCR0);
}
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -2770,14 +2821,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -2835,11 +2886,13 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
- val = I915_READ(DPCLKA_CFGCR0_ICL);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
for_each_port_masked(port, port_mask) {
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
bool ddi_clk_ungated = !(val &
icl_dpclka_cfgcr0_clk_off(dev_priv,
- port));
+ phy));
if (ddi_clk_needed == ddi_clk_ungated)
continue;
@@ -2851,10 +2904,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (WARN_ON(ddi_clk_needed))
continue;
- DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- port_name(port));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(port));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
}
}
@@ -2863,6 +2916,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -2872,9 +2926,15 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_port_is_combophy(dev_priv, port))
+ if (!intel_phy_is_combo(dev_priv, phy))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+ else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
+ /*
+ * MG does not exist but the programming is required
+ * to ungate DDIC and DDID
+ */
+ I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2912,9 +2972,11 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_port_is_combophy(dev_priv, port))
+ if (!intel_phy_is_combo(dev_priv, phy) ||
+ (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@ -2995,25 +3057,22 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 ln0, ln1, lane_info;
+ u32 ln0, ln1, lane_mask;
- if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+ if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
ln0 = I915_READ(MG_DP_MODE(0, port));
ln1 = I915_READ(MG_DP_MODE(1, port));
- switch (intel_dig_port->tc_type) {
- case TC_PORT_TYPEC:
+ switch (intel_dig_port->tc_mode) {
+ case TC_PORT_DP_ALT:
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
- switch (lane_info) {
+ switch (lane_mask) {
case 0x1:
case 0x4:
break;
@@ -3038,7 +3097,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
MG_DP_MODE_CFG_DP_X2_MODE;
break;
default:
- MISSING_CASE(lane_info);
+ MISSING_CASE(lane_mask);
}
break;
@@ -3048,7 +3107,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
break;
default:
- MISSING_CASE(intel_dig_port->tc_type);
+ MISSING_CASE(intel_dig_port->tc_mode);
return;
}
@@ -3080,10 +3139,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
- DP_TP_STATUS_FEC_ENABLE_LIVE,
- DP_TP_STATUS_FEC_ENABLE_LIVE,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
@@ -3110,6 +3167,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
@@ -3123,7 +3181,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
@@ -3138,11 +3199,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_port_is_combophy(dev_priv, port)) {
+ if (intel_phy_is_combo(dev_priv, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
- intel_combo_phy_power_up_lanes(dev_priv, port, false,
+ intel_combo_phy_power_up_lanes(dev_priv, phy, false,
crtc_state->lane_count,
lane_reversal);
}
@@ -3290,6 +3351,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (!is_mst) {
intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -3305,8 +3367,10 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_put_unchecked(dev_priv,
+ dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
}
@@ -3511,7 +3575,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
+ intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ (u8)conn_state->hdcp_content_type);
}
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
@@ -3580,44 +3645,65 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
- else if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- intel_hdcp_disable(to_intel_connector(conn_state->connector));
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
}
-static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- enum port port)
+static void
+intel_ddi_update_prepare(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
- switch (pipe_config->lane_count) {
- case 1:
- val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML0(tc_port);
- break;
- case 2:
- val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
- break;
- case 4:
- val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
- break;
- default:
- MISSING_CASE(pipe_config->lane_count);
- }
- I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+ struct intel_crtc_state *crtc_state =
+ crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
+ int required_lanes = crtc_state ? crtc_state->lane_count : 1;
+
+ WARN_ON(crtc && crtc->active);
+
+ intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
+ if (crtc_state && crtc_state->base.active)
+ intel_update_active_dpll(state, crtc, encoder);
+}
+
+static void
+intel_ddi_update_complete(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
+{
+ intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
}
static void
@@ -3627,26 +3713,25 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
- if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ if (is_tc_port)
+ intel_tc_port_get_link(dig_port, crtc_state->lane_count);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
- if (IS_GEN9_LP(dev_priv))
+ if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
+ /*
+ * Program the lane count for static/dynamic connections on
+ * Type-C ports. Skip this step for TBT.
+ */
+ intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
-
- /*
- * Program the lane count for static/dynamic connections on Type-C ports.
- * Skip this step for TBT.
- */
- if (dig_port->tc_type == TC_PORT_UNKNOWN ||
- dig_port->tc_type == TC_PORT_TBT)
- return;
-
- intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
}
static void
@@ -3656,11 +3741,15 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
- if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
intel_display_power_put_unchecked(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
+
+ if (is_tc_port)
+ intel_tc_port_put_link(dig_port);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3737,7 +3826,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
@@ -3776,7 +3864,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
- intel_dig_port = enc_to_dig_port(&encoder->base);
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@ -3914,49 +4001,18 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
return 0;
}
-static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- intel_dp_encoder_suspend(encoder);
-
- /*
- * TODO: disconnect also from USB DP alternate mode once we have a
- * way to handle the modeset restore in that mode during resume
- * even if the sink has disappeared while being suspended.
- */
- if (dig_port->tc_legacy_port)
- icl_tc_phy_disconnect(i915, dig_port);
-}
-
-static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
- struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
-
- if (intel_port_is_tc(i915, dig_port->base.port))
- intel_digital_port_connected(&dig_port->base);
-
- intel_dp_encoder_reset(drm_encoder);
-}
-
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->dev);
intel_dp_encoder_flush_work(encoder);
- if (intel_port_is_tc(i915, dig_port->base.port))
- icl_tc_phy_disconnect(i915, dig_port);
-
drm_encoder_cleanup(encoder);
kfree(dig_port);
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .reset = intel_ddi_encoder_reset,
+ .reset = intel_dp_encoder_reset,
.destroy = intel_ddi_encoder_destroy,
};
@@ -4081,14 +4137,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
return modeset_pipe(&crtc->base, ctx);
}
-static bool intel_ddi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_ddi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct drm_modeset_acquire_ctx ctx;
- bool changed;
+ enum intel_hotplug_state state;
int ret;
- changed = intel_encoder_hotplug(encoder, connector);
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4110,7 +4169,27 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- return changed;
+ /*
+ * Unpowered type-c dongles can take some time to boot and be
+ * responsible, so here giving some time to those dongles to power up
+ * and then retrying the probe.
+ *
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
+ !dig_port->dp.is_mst)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
}
static struct intel_connector *
@@ -4198,6 +4277,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum pipe pipe;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
init_dp = port_info->supports_dp;
@@ -4242,7 +4322,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_ddi_encoder_suspend;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4261,9 +4341,15 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
- !port_info->supports_typec_usb &&
- !port_info->supports_tbt;
+ if (intel_phy_is_tc(dev_priv, phy)) {
+ bool is_legacy = !port_info->supports_typec_usb &&
+ !port_info->supports_tbt;
+
+ intel_tc_port_init(intel_dig_port, is_legacy);
+
+ intel_encoder->update_prepare = intel_ddi_update_prepare;
+ intel_encoder->update_complete = intel_ddi_update_complete;
+ }
switch (port) {
case PORT_A:
@@ -4290,6 +4376,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_F_IO;
break;
+ case PORT_G:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_G_IO;
+ break;
+ case PORT_H:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_H_IO;
+ break;
+ case PORT_I:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_I_IO;
+ break;
default:
MISSING_CASE(port);
}
@@ -4324,9 +4422,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(intel_dig_port);
- if (intel_port_is_tc(dev_priv, port))
- intel_digital_port_connected(intel_encoder);
-
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 592b92782fab..b51d1ceb8739 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -29,7 +29,7 @@
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
@@ -62,9 +62,9 @@
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
-#include "intel_color.h"
#include "intel_cdclk.h"
-#include "intel_drv.h"
+#include "intel_color.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
@@ -78,6 +78,7 @@
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
+#include "intel_tc.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -515,9 +516,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
}
static bool
-needs_modeset(const struct drm_crtc_state *state)
+needs_modeset(const struct intel_crtc_state *state)
{
- return drm_atomic_crtc_needs_modeset(state);
+ return drm_atomic_crtc_needs_modeset(&state->base);
}
/*
@@ -1076,9 +1077,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, I965_PIPECONF_ACTIVE, 0,
- 100))
+ if (intel_de_wait_for_clear(dev_priv, reg,
+ I965_PIPECONF_ACTIVE, 100))
WARN(1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -1382,11 +1382,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL(pipe),
- DPLL_LOCK_VLV,
- DPLL_LOCK_VLV,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
}
@@ -1435,9 +1431,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
}
@@ -1616,9 +1610,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (intel_wait_for_register(&dev_priv->uncore,
- dpll_reg, port_mask, expected_mask,
- 1000))
+ if (intel_de_wait_for_register(dev_priv, dpll_reg,
+ port_mask, expected_mask, 1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
port_name(dport->base.port),
I915_READ(dpll_reg) & port_mask, expected_mask);
@@ -1677,9 +1670,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
I915_WRITE(reg, val | TRANS_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
- 100))
+ if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
}
@@ -1707,11 +1698,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (intel_wait_for_register(&dev_priv->uncore,
- LPT_TRANSCONF,
- TRANS_STATE_ENABLE,
- TRANS_STATE_ENABLE,
- 100))
+ if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
@@ -1733,9 +1721,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, TRANS_STATE_ENABLE, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
@@ -1755,9 +1741,8 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(&dev_priv->uncore,
- LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
@@ -3048,12 +3033,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
+ struct drm_i915_gem_object *obj;
+ bool ret = false;
size_aligned -= base_aligned;
@@ -3095,7 +3081,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
break;
default:
MISSING_CASE(plane_config->tiling);
- return false;
+ goto out;
}
mode_cmd.pixel_format = fb->format->format;
@@ -3107,16 +3093,15 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
DRM_DEBUG_KMS("intel fb init failed\n");
- goto out_unref_obj;
+ goto out;
}
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
- return true;
-
-out_unref_obj:
+ ret = true;
+out:
i915_gem_object_put(obj);
- return false;
+ return ret;
}
static void
@@ -3173,6 +3158,12 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_disable_plane(plane, crtc_state);
}
+static struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -3180,7 +3171,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
- struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
struct intel_plane *intel_plane = to_intel_plane(primary);
@@ -3256,8 +3246,7 @@ valid_fb:
return;
}
- obj = intel_fb_obj(fb);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
plane_state->src_x = 0;
plane_state->src_y = 0;
@@ -3272,14 +3261,14 @@ valid_fb:
intel_state->base.src = drm_plane_state_src(plane_state);
intel_state->base.dst = drm_plane_state_dest(plane_state);
- if (i915_gem_object_is_tiled(obj))
+ if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
plane_state->crtc = &intel_crtc->base;
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
- &obj->frontbuffer_bits);
+ &to_intel_frontbuffer(fb)->bits);
}
static int skl_max_plane_width(const struct drm_framebuffer *fb,
@@ -3715,10 +3704,27 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (IS_GEN(dev_priv, 4))
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
static int
i9xx_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
int ret;
ret = chv_plane_check_rotation(plane_state);
@@ -3729,7 +3735,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
&crtc_state->base,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ i9xx_plane_has_windowing(plane),
+ true);
if (ret)
return ret;
@@ -3758,6 +3765,10 @@ static void i9xx_update_plane(struct intel_plane *plane,
u32 linear_offset;
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->base.dst);
+ int crtc_h = drm_rect_height(&plane_state->base.dst);
unsigned long irqflags;
u32 dspaddr_offset;
u32 dspcntr;
@@ -3776,18 +3787,18 @@ static void i9xx_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (INTEL_GEN(dev_priv) < 4) {
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
*/
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
+ I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
@@ -3950,10 +3961,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_FORMAT_XRGB_8888;
+ case DRM_FORMAT_XBGR2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
- case DRM_FORMAT_XBGR2101010:
- return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
@@ -4248,12 +4259,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.wait_queue);
+ set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&dev_priv->gt);
}
/*
@@ -4299,7 +4311,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
return;
state = fetch_and_zero(&dev_priv->modeset_restore_state);
@@ -4339,7 +4351,7 @@ unlock:
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
- clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+ clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
}
static void icl_set_pipe_chicken(struct intel_crtc *crtc)
@@ -5669,9 +5681,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (intel_wait_for_register(&dev_priv->uncore,
- IPS_CTL, IPS_ENABLE, IPS_ENABLE,
- 50))
+ if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
DRM_ERROR("Timed out waiting for IPS enable\n");
}
}
@@ -5692,9 +5702,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- IPS_CTL, IPS_ENABLE, 0,
- 100))
+ if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
@@ -5796,7 +5804,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
if (!old_crtc_state->ips_enabled)
return false;
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
return true;
/*
@@ -5823,7 +5831,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
if (!new_crtc_state->ips_enabled)
return false;
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
return true;
/*
@@ -5877,13 +5885,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *old_state = old_crtc_state->base.state;
+ struct drm_atomic_state *state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
- intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
+ intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
crtc);
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(old_state, primary);
+ drm_atomic_get_old_plane_state(state, primary);
intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
@@ -5895,12 +5903,12 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_primary_state) {
struct drm_plane_state *new_primary_state =
- drm_atomic_get_new_plane_state(old_state, primary);
+ drm_atomic_get_new_plane_state(state, primary);
intel_fbc_post_update(crtc);
if (new_primary_state->visible &&
- (needs_modeset(&pipe_config->base) ||
+ (needs_modeset(pipe_config) ||
!old_primary_state->visible))
intel_post_enable_primary(&crtc->base, pipe_config);
}
@@ -5920,20 +5928,20 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *old_state = old_crtc_state->base.state;
+ struct drm_atomic_state *state = old_crtc_state->base.state;
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(old_state, primary);
- bool modeset = needs_modeset(&pipe_config->base);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
+ drm_atomic_get_old_plane_state(state, primary);
+ bool modeset = needs_modeset(pipe_config);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
hsw_disable_ips(old_crtc_state);
if (old_primary_state) {
struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(old_intel_state,
+ intel_atomic_get_new_plane_state(intel_state,
to_intel_plane(primary));
intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
@@ -5984,7 +5992,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* If we're doing a modeset, we're done. No need to do any pre-vblank
* watermark programming here.
*/
- if (needs_modeset(&pipe_config->base))
+ if (needs_modeset(pipe_config))
return;
/*
@@ -6002,7 +6010,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* us to.
*/
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
+ dev_priv->display.initial_watermarks(intel_state,
pipe_config);
else if (pipe_config->update_wm_pre)
intel_update_watermarks(crtc);
@@ -6036,19 +6044,111 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
intel_frontbuffer_flip(dev_priv, fb_bits);
}
-static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+/*
+ * intel_connector_primary_encoder - get the primary encoder for a connector
+ * @connector: connector for which to return the encoder
+ *
+ * Returns the primary encoder for a connector. There is a 1:1 mapping from
+ * all connectors to their encoder, except for DP-MST connectors which have
+ * both a virtual and a primary encoder. These DP-MST primary encoders can be
+ * pointed to by as many DP-MST connectors as there are pipes.
+ */
+static struct intel_encoder *
+intel_connector_primary_encoder(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder;
+
+ if (connector->mst_port)
+ return &dp_to_dig_port(connector->mst_port)->base;
+
+ encoder = intel_attached_encoder(&connector->base);
+ WARN_ON(!encoder);
+
+ return encoder;
+}
+
+static bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+ const struct drm_connector_state *old_conn_state,
+ const struct drm_connector_state *new_conn_state)
+{
+ struct intel_crtc *old_crtc = old_conn_state->crtc ?
+ to_intel_crtc(old_conn_state->crtc) : NULL;
+ struct intel_crtc *new_crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+
+ return new_crtc != old_crtc ||
+ (new_crtc &&
+ needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
+}
+
+static void intel_encoders_update_prepare(struct intel_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, conn,
+ old_conn_state, new_conn_state, i) {
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state,
+ old_conn_state,
+ new_conn_state))
+ continue;
+
+ encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ if (!encoder->update_prepare)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_prepare(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_update_complete(struct intel_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, conn,
+ old_conn_state, new_conn_state, i) {
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state,
+ old_conn_state,
+ new_conn_state))
+ continue;
+
+ encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ if (!encoder->update_complete)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_complete(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->pre_pll_enable)
@@ -6056,19 +6156,19 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+static void intel_encoders_pre_enable(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->pre_enable)
@@ -6076,19 +6176,19 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_enable(struct drm_crtc *crtc,
+static void intel_encoders_enable(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->enable)
@@ -6097,19 +6197,19 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_disable(struct drm_crtc *crtc,
+static void intel_encoders_disable(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
intel_opregion_notify_encoder(encoder, false);
@@ -6118,19 +6218,19 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_post_disable(struct drm_crtc *crtc,
+static void intel_encoders_post_disable(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
if (encoder->post_disable)
@@ -6138,19 +6238,19 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
if (encoder->post_pll_disable)
@@ -6158,19 +6258,19 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_update_pipe(struct drm_crtc *crtc,
+static void intel_encoders_update_pipe(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->update_pipe)
@@ -6187,15 +6287,13 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat
}
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -6231,7 +6329,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
if (pipe_config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -6255,16 +6353,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+ dev_priv->display.initial_watermarks(state, pipe_config);
intel_enable_pipe(pipe_config);
if (pipe_config->has_pch_encoder)
- ironlake_pch_enable(old_intel_state, pipe_config);
+ ironlake_pch_enable(state, pipe_config);
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -6310,33 +6408,37 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
u32 val;
val = MBUS_DBOX_A_CREDIT(2);
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
if (WARN_ON(intel_crtc->active))
return;
- intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
if (pipe_config->shared_dpll)
intel_enable_shared_dpll(pipe_config);
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
if (intel_crtc_has_dp_encoder(pipe_config))
intel_dp_set_m_n(pipe_config, M1_N1);
@@ -6394,7 +6496,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+ dev_priv->display.initial_watermarks(state, pipe_config);
if (INTEL_GEN(dev_priv) >= 11)
icl_pipe_mbus_enable(intel_crtc);
@@ -6404,7 +6506,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (pipe_config->has_pch_encoder)
- lpt_pch_enable(old_intel_state, pipe_config);
+ lpt_pch_enable(state, pipe_config);
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -6412,7 +6514,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
if (psl_clkgate_wa) {
intel_wait_for_vblank(dev_priv, pipe);
@@ -6444,7 +6546,7 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_device *dev = crtc->dev;
@@ -6460,7 +6562,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(intel_crtc, old_crtc_state, state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -6472,7 +6574,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (old_crtc_state->has_pch_encoder)
ironlake_fdi_disable(crtc);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
if (old_crtc_state->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -6503,14 +6605,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
}
static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(intel_crtc, old_crtc_state, state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -6532,9 +6634,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
else
ironlake_pfit_disable(old_crtc_state);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
- intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -6560,33 +6662,47 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (port == PORT_NONE)
+ if (phy == PHY_NONE)
return false;
if (IS_ELKHARTLAKE(dev_priv))
- return port <= PORT_C;
+ return phy <= PHY_C;
if (INTEL_GEN(dev_priv) >= 11)
- return port <= PORT_B;
+ return phy <= PHY_B;
return false;
}
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
+ if (INTEL_GEN(dev_priv) >= 12)
+ return phy >= PHY_D && phy <= PHY_I;
+
if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
- return port >= PORT_C && port <= PORT_F;
+ return phy >= PHY_C && phy <= PHY_F;
return false;
}
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
+{
+ if (IS_ELKHARTLAKE(i915) && port == PORT_D)
+ return PHY_A;
+
+ return (enum phy)port;
+}
+
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
{
- if (!intel_port_is_tc(dev_priv, port))
+ if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
return PORT_TC_NONE;
+ if (INTEL_GEN(dev_priv) >= 12)
+ return port - PORT_D;
+
return port - PORT_C;
}
@@ -6614,6 +6730,26 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ dig_port->tc_mode == TC_PORT_TBT_ALT) {
+ switch (dig_port->aux_ch) {
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_TBT1;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_TBT2;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_TBT3;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_TBT4;
+ default:
+ MISSING_CASE(dig_port->aux_ch);
+ return POWER_DOMAIN_AUX_TBT1;
+ }
+ }
+
switch (dig_port->aux_ch) {
case AUX_CH_A:
return POWER_DOMAIN_AUX_A;
@@ -6633,14 +6769,12 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
}
}
-static u64 get_crtc_power_domains(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u64 mask;
enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -6653,7 +6787,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
- drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
+ drm_for_each_encoder_mask(encoder, &dev_priv->drm,
+ crtc_state->base.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
mask |= BIT_ULL(intel_encoder->power_domain);
@@ -6669,17 +6804,16 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
}
static u64
-modeset_get_crtc_power_domains(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
- old_domains = intel_crtc->enabled_power_domains;
- intel_crtc->enabled_power_domains = new_domains =
- get_crtc_power_domains(crtc, crtc_state);
+ old_domains = crtc->enabled_power_domains;
+ crtc->enabled_power_domains = new_domains =
+ get_crtc_power_domains(crtc_state);
domains = new_domains & ~old_domains;
@@ -6699,10 +6833,8 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
}
static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -6729,7 +6861,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
if (IS_CHERRYVIEW(dev_priv)) {
chv_prepare_pll(intel_crtc, pipe_config);
@@ -6739,7 +6871,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
vlv_enable_pll(intel_crtc, pipe_config);
}
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
i9xx_pfit_enable(pipe_config);
@@ -6748,14 +6880,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(pipe_config);
- dev_priv->display.initial_watermarks(old_intel_state,
- pipe_config);
+ dev_priv->display.initial_watermarks(state, pipe_config);
intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
}
static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
@@ -6768,10 +6899,8 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
}
static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -6796,7 +6925,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
i9xx_enable_pll(intel_crtc, pipe_config);
@@ -6808,7 +6937,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
+ dev_priv->display.initial_watermarks(state,
pipe_config);
else
intel_update_watermarks(intel_crtc);
@@ -6817,7 +6946,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
@@ -6836,7 +6965,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_device *dev = crtc->dev;
@@ -6851,7 +6980,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(intel_crtc, old_crtc_state, state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -6860,7 +6989,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
i9xx_pfit_disable(old_crtc_state);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
@@ -6871,7 +7000,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
i9xx_disable_pll(old_crtc_state);
}
- intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6925,7 +7054,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
WARN_ON(IS_ERR(crtc_state) || ret);
- dev_priv->display.crtc_disable(crtc_state, state);
+ dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
drm_atomic_state_put(state);
@@ -6988,7 +7117,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
-static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
+static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
@@ -7006,7 +7135,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
if (!crtc_state)
return;
- I915_STATE_WARN(!crtc_state->active,
+ I915_STATE_WARN(!crtc_state->base.active,
"connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -7018,7 +7147,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
- I915_STATE_WARN(crtc_state && crtc_state->active,
+ I915_STATE_WARN(crtc_state && crtc_state->base.active,
"attached crtc is active, but connector isn't\n");
I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
"best encoder set without crtc!\n");
@@ -9484,6 +9613,8 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
const struct intel_limit *limit;
int refclk = 120000;
@@ -9525,7 +9656,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
ironlake_compute_dpll(crtc, crtc_state, NULL);
- if (!intel_get_shared_dpll(crtc_state, NULL)) {
+ if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9906,7 +10037,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (!intel_get_shared_dpll(crtc_state, encoder)) {
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9936,22 +10067,37 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum icl_port_dpll_id port_dpll_id;
enum intel_dpll_id id;
u32 temp;
- /* TODO: TBT pll not implemented. */
- if (intel_port_is_combophy(dev_priv, port)) {
- temp = I915_READ(DPCLKA_CFGCR0_ICL) &
- DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- } else if (intel_port_is_tc(dev_priv, port)) {
- id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ temp = I915_READ(ICL_DPCLKA_CFGCR0) &
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ } else if (intel_phy_is_tc(dev_priv, phy)) {
+ u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+
+ if (clk_sel == DDI_CLK_SEL_MG) {
+ id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+ port));
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+ } else {
+ WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+ id = DPLL_ID_ICL_TBTPLL;
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ }
} else {
WARN(1, "Invalid port %x\n", port);
return;
}
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+ pipe_config->icl_port_dplls[port_dpll_id].pll =
+ intel_get_shared_dpll_by_id(dev_priv, id);
+
+ icl_set_active_port_dpll(pipe_config, port_dpll_id);
}
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -10191,7 +10337,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
- port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+ if (INTEL_GEN(dev_priv) >= 12)
+ port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ else
+ port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -11297,7 +11446,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
*
* Returns true or false.
*/
-static bool intel_wm_need_update(struct intel_plane_state *cur,
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
struct intel_plane_state *new)
{
/* Update watermarks on tiling or size changes. */
@@ -11329,33 +11478,28 @@ static bool needs_scaling(const struct intel_plane_state *state)
}
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state)
+ struct intel_plane_state *plane_state)
{
- struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
- struct drm_crtc *crtc = crtc_state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->plane);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = old_crtc_state->base.active;
- bool is_crtc_enabled = crtc_state->active;
+ bool is_crtc_enabled = crtc_state->base.active;
bool turn_off, turn_on, visible, was_visible;
- struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(
- to_intel_crtc_state(crtc_state),
- to_intel_plane_state(plane_state));
+ ret = skl_update_scaler_plane(crtc_state, plane_state);
if (ret)
return ret;
}
was_visible = old_plane_state->base.visible;
- visible = plane_state->visible;
+ visible = plane_state->base.visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -11371,22 +11515,22 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->visible = visible = false;
- to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
- to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
+ plane_state->base.visible = visible = false;
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
}
if (!was_visible && !visible)
return 0;
if (fb != old_plane_state->base.fb)
- pipe_config->fb_changed = true;
+ crtc_state->fb_changed = true;
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
+ crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
fb ? fb->base.id : -1);
@@ -11397,29 +11541,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
if (turn_on) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- pipe_config->update_wm_pre = true;
+ crtc_state->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- pipe_config->disable_cxsr = true;
+ crtc_state->disable_cxsr = true;
} else if (turn_off) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- pipe_config->update_wm_post = true;
+ crtc_state->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- pipe_config->disable_cxsr = true;
- } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
- to_intel_plane_state(plane_state))) {
+ crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, plane_state)) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
- pipe_config->update_wm_pre = true;
- pipe_config->update_wm_post = true;
+ crtc_state->update_wm_pre = true;
+ crtc_state->update_wm_post = true;
}
}
if (visible || was_visible)
- pipe_config->fb_bits |= plane->frontbuffer_bit;
+ crtc_state->fb_bits |= plane->frontbuffer_bit;
/*
* ILK/SNB DVSACNTR/Sprite Enable
@@ -11458,8 +11601,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
(IS_GEN_RANGE(dev_priv, 5, 6) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(to_intel_plane_state(plane_state)))))
- pipe_config->disable_lp_wm = true;
+ needs_scaling(plane_state))))
+ crtc_state->disable_lp_wm = true;
return 0;
}
@@ -11608,7 +11751,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
int ret;
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = needs_modeset(pipe_config);
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
mode_changed && !crtc_state->active)
@@ -12090,6 +12233,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+ memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
+ sizeof(saved_state->icl_port_dplls));
saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12706,10 +12851,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
}
}
-static void verify_wm_state(struct drm_crtc *crtc,
- struct drm_crtc_state *new_state)
+static void verify_wm_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
@@ -12719,21 +12864,20 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const enum pipe pipe = intel_crtc->pipe;
+ const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return;
- skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
- sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+ sw_wm = &new_crtc_state->wm.skl.optimal;
- skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
skl_ddb_get_hw_state(dev_priv, &hw->ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -12781,7 +12925,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
/* DDB */
hw_ddb_entry = &hw->ddb_y[plane];
- sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -12833,7 +12977,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
/* DDB */
hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
- sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12847,23 +12991,22 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
static void
-verify_connector_state(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_crtc *crtc)
+verify_connector_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc_state *crtc_state = NULL;
+ struct intel_crtc_state *crtc_state = NULL;
- if (new_conn_state->crtc != crtc)
+ if (new_conn_state->crtc != &crtc->base)
continue;
if (crtc)
- crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
intel_connector_verify_state(crtc_state, new_conn_state);
@@ -12873,14 +13016,14 @@ verify_connector_state(struct drm_device *dev,
}
static void
-verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
+verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
{
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
@@ -12888,7 +13031,7 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
encoder->base.base.id,
encoder->base.name);
- for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+ for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
new_conn_state, i) {
if (old_conn_state->best_encoder == &encoder->base)
found = true;
@@ -12922,50 +13065,49 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
}
static void
-verify_crtc_state(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+verify_crtc_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config, *sw_config;
- struct drm_atomic_state *old_state;
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
bool active;
- old_state = old_crtc_state->state;
- __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
- pipe_config = to_intel_crtc_state(old_crtc_state);
+ state = old_crtc_state->base.state;
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
+ pipe_config = old_crtc_state;
memset(pipe_config, 0, sizeof(*pipe_config));
- pipe_config->base.crtc = crtc;
- pipe_config->base.state = old_state;
+ pipe_config->base.crtc = &crtc->base;
+ pipe_config->base.state = state;
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
- active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
+ active = dev_priv->display.get_pipe_config(crtc, pipe_config);
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- active = new_crtc_state->active;
+ active = new_crtc_state->base.active;
- I915_STATE_WARN(new_crtc_state->active != active,
+ I915_STATE_WARN(new_crtc_state->base.active != active,
"crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", new_crtc_state->active, active);
+ "(expected %i, found %i)\n", new_crtc_state->base.active, active);
- I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+ I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
"transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
+ "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
enum pipe pipe;
active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != new_crtc_state->active,
+ I915_STATE_WARN(active != new_crtc_state->base.active,
"[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active, new_crtc_state->active);
+ encoder->base.base.id, active, new_crtc_state->base.active);
- I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+ I915_STATE_WARN(active && crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
@@ -12975,16 +13117,16 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
- if (!new_crtc_state->active)
+ if (!new_crtc_state->base.active)
return;
intel_pipe_config_sanity_check(dev_priv, pipe_config);
- sw_config = to_intel_crtc_state(new_crtc_state);
- if (!intel_pipe_config_compare(sw_config, pipe_config, false)) {
+ if (!intel_pipe_config_compare(new_crtc_state,
+ pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
- intel_dump_pipe_config(sw_config, NULL, "[sw state]");
+ intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
}
}
@@ -13004,8 +13146,8 @@ intel_verify_planes(struct intel_atomic_state *state)
static void
verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
- struct drm_crtc *crtc,
- struct drm_crtc_state *new_state)
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
{
struct intel_dpll_hw_state dpll_hw_state;
unsigned int crtc_mask;
@@ -13035,16 +13177,16 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
return;
}
- crtc_mask = drm_crtc_mask(crtc);
+ crtc_mask = drm_crtc_mask(&crtc->base);
- if (new_state->active)
+ if (new_crtc_state->base.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -13057,51 +13199,47 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
}
static void
-verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+verify_shared_dpll_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
- struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (new_state->shared_dpll)
- verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+ if (new_crtc_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
- if (old_state->shared_dpll &&
- old_state->shared_dpll != new_state->shared_dpll) {
- unsigned int crtc_mask = drm_crtc_mask(crtc);
- struct intel_shared_dpll *pll = old_state->shared_dpll;
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(crtc)));
+ pipe_name(drm_crtc_index(&crtc->base)));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(crtc)));
+ pipe_name(drm_crtc_index(&crtc->base)));
}
}
static void
-intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- if (!needs_modeset(new_state) &&
- !to_intel_crtc_state(new_state)->update_pipe)
+ if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
- verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, state, crtc);
- verify_crtc_state(crtc, old_state, new_state);
- verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+ verify_wm_state(crtc, new_crtc_state);
+ verify_connector_state(state, crtc);
+ verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
+ verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
}
static void
-verify_disabled_dpll_state(struct drm_device *dev)
+verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13109,12 +13247,12 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev,
- struct drm_atomic_state *state)
+intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state)
{
- verify_encoder_state(dev, state);
- verify_connector_state(dev, state, NULL);
- verify_disabled_dpll_state(dev);
+ verify_encoder_state(dev_priv, state);
+ verify_connector_state(state, NULL);
+ verify_disabled_dpll_state(dev_priv);
}
static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
@@ -13168,27 +13306,18 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
if (!dev_priv->display.crtc_compute_clock)
return;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- struct intel_shared_dpll *old_dpll =
- old_crtc_state->shared_dpll;
-
- if (!needs_modeset(&new_crtc_state->base))
- continue;
-
- new_crtc_state->shared_dpll = NULL;
-
- if (!old_dpll)
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
continue;
- intel_release_shared_dpll(old_dpll, crtc, &state->base);
+ intel_release_shared_dplls(state, crtc);
}
}
@@ -13210,7 +13339,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->base.active ||
- !needs_modeset(&crtc_state->base))
+ !needs_modeset(crtc_state))
continue;
if (first_crtc_state) {
@@ -13235,7 +13364,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
if (!crtc_state->base.active ||
- needs_modeset(&crtc_state->base))
+ needs_modeset(crtc_state))
continue;
/* 2 or more enabled crtcs means no need for w/a */
@@ -13253,15 +13382,16 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_lock_all_pipes(struct drm_atomic_state *state)
+static int intel_lock_all_pipes(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
/* Add all pipes to the state */
- for_each_crtc(state->dev, crtc) {
- struct drm_crtc_state *crtc_state;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
}
@@ -13269,32 +13399,35 @@ static int intel_lock_all_pipes(struct drm_atomic_state *state)
return 0;
}
-static int intel_modeset_all_pipes(struct drm_atomic_state *state)
+static int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
/*
* Add all pipes to the state, and force
* a modeset on all the active ones.
*/
- for_each_crtc(state->dev, crtc) {
- struct drm_crtc_state *crtc_state;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
int ret;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (!crtc_state->active || needs_modeset(crtc_state))
+ if (!crtc_state->base.active || needs_modeset(crtc_state))
continue;
- crtc_state->mode_changed = true;
+ crtc_state->base.mode_changed = true;
- ret = drm_atomic_add_affected_connectors(state, crtc);
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
if (ret)
return ret;
- ret = drm_atomic_add_affected_planes(state, crtc);
+ ret = drm_atomic_add_affected_planes(&state->base,
+ &crtc->base);
if (ret)
return ret;
}
@@ -13356,18 +13489,18 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
*/
if (intel_cdclk_changed(&dev_priv->cdclk.logical,
&state->cdclk.logical)) {
- ret = intel_lock_all_pipes(&state->base);
+ ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
}
if (is_power_of_2(state->active_crtcs)) {
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
pipe = ilog2(state->active_crtcs);
- crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
- crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
if (crtc_state && needs_modeset(crtc_state))
pipe = INVALID_PIPE;
} else {
@@ -13379,14 +13512,14 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
intel_cdclk_needs_cd2x_update(dev_priv,
&dev_priv->cdclk.actual,
&state->cdclk.actual)) {
- ret = intel_lock_all_pipes(&state->base);
+ ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
state->cdclk.pipe = pipe;
} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
&state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(&state->base);
+ ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
@@ -13478,7 +13611,7 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(&new_crtc_state->base))
+ if (!needs_modeset(new_crtc_state))
continue;
if (!new_crtc_state->base.enable) {
@@ -13492,7 +13625,7 @@ static int intel_atomic_check(struct drm_device *dev,
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
any_ms = true;
}
@@ -13527,12 +13660,12 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(&new_crtc_state->base) &&
+ if (!needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
continue;
intel_dump_pipe_config(new_crtc_state, state,
- needs_modeset(&new_crtc_state->base) ?
+ needs_modeset(new_crtc_state) ?
"[modeset]" : "[fastset]");
}
@@ -13553,10 +13686,10 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
-static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state)
+static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
- return drm_atomic_helper_prepare_planes(dev, state);
+ return drm_atomic_helper_prepare_planes(state->base.dev,
+ &state->base);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@ -13567,60 +13700,57 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
if (!vblank->max_vblank_count)
return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
- return dev->driver->get_vblank_counter(dev, crtc->pipe);
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
-static void intel_update_crtc(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+static void intel_update_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
- to_intel_plane(crtc->primary));
+ intel_atomic_get_new_plane_state(state,
+ to_intel_plane(crtc->base.primary));
if (modeset) {
- update_scanline_offset(pipe_config);
- dev_priv->display.crtc_enable(pipe_config, state);
+ update_scanline_offset(new_crtc_state);
+ dev_priv->display.crtc_enable(new_crtc_state, state);
/* vblanks work again, re-enable pipe CRC. */
- intel_crtc_enable_pipe_crc(intel_crtc);
+ intel_crtc_enable_pipe_crc(crtc);
} else {
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
- pipe_config);
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
- if (pipe_config->update_pipe)
- intel_encoders_update_pipe(crtc, pipe_config, state);
+ if (new_crtc_state->update_pipe)
+ intel_encoders_update_pipe(crtc, new_crtc_state, state);
}
- if (pipe_config->update_pipe && !pipe_config->enable_fbc)
- intel_fbc_disable(intel_crtc);
+ if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
else if (new_plane_state)
- intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
+ intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
- intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+ intel_begin_crtc_commit(state, crtc);
if (INTEL_GEN(dev_priv) >= 9)
- skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ skl_update_planes_on_crtc(state, crtc);
else
- i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ i9xx_update_planes_on_crtc(state, crtc);
- intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+ intel_finish_crtc_commit(state, crtc);
}
-static void intel_update_crtcs(struct drm_atomic_state *state)
+static void intel_update_crtcs(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->active)
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!new_crtc_state->base.active)
continue;
intel_update_crtc(crtc, state, old_crtc_state,
@@ -13628,26 +13758,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state)
}
}
-static void skl_update_crtcs(struct drm_atomic_state *state)
+static void skl_update_crtcs(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc_state *cstate;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
unsigned int updated = 0;
bool progress;
enum pipe pipe;
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
+ u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
- if (new_crtc_state->active)
- entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ if (new_crtc_state->base.active)
+ entries[i] = old_crtc_state->wm.skl.ddb;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -13662,24 +13789,22 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
do {
progress = false;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool vbl_wait = false;
- unsigned int cmask = drm_crtc_mask(crtc);
+ unsigned int cmask = drm_crtc_mask(&crtc->base);
- intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(new_crtc_state);
- pipe = intel_crtc->pipe;
+ pipe = crtc->pipe;
- if (updated & cmask || !cstate->base.active)
+ if (updated & cmask || !new_crtc_state->base.active)
continue;
- if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
+ if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries,
INTEL_INFO(dev_priv)->num_pipes, i))
continue;
updated |= cmask;
- entries[i] = cstate->wm.skl.ddb;
+ entries[i] = new_crtc_state->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -13687,10 +13812,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
* then we need to wait for a vblank to pass for the
* new ddb allocation to take effect.
*/
- if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
- &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
- !new_crtc_state->active_changed &&
- intel_state->wm_results.dirty_pipes != updated)
+ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
+ &old_crtc_state->wm.skl.ddb) &&
+ !new_crtc_state->base.active_changed &&
+ state->wm_results.dirty_pipes != updated)
vbl_wait = true;
intel_update_crtc(crtc, state, old_crtc_state,
@@ -13736,18 +13861,21 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(&dev_priv->gpu_error.wait_queue,
+ prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);
- if (i915_sw_fence_done(&intel_state->commit_ready)
- || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ if (i915_sw_fence_done(&intel_state->commit_ready) ||
+ test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
break;
schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
+ finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ I915_RESET_MODESET),
+ &wait_reset);
}
static void intel_atomic_cleanup_work(struct work_struct *work)
@@ -13763,57 +13891,49 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
-static void intel_atomic_commit_tail(struct drm_atomic_state *state)
+static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
u64 put_domains[I915_MAX_PIPES] = {};
intel_wakeref_t wakeref = 0;
int i;
- intel_atomic_commit_fence_wait(intel_state);
+ intel_atomic_commit_fence_wait(state);
- drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_wait_for_dependencies(&state->base);
- if (intel_state->modeset)
+ if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
- intel_crtc = to_intel_crtc(crtc);
-
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (needs_modeset(new_crtc_state) ||
- to_intel_crtc_state(new_crtc_state)->update_pipe) {
+ new_crtc_state->update_pipe) {
- put_domains[intel_crtc->pipe] =
- modeset_get_crtc_power_domains(crtc,
- new_intel_crtc_state);
+ put_domains[crtc->pipe] =
+ modeset_get_crtc_power_domains(new_crtc_state);
}
if (!needs_modeset(new_crtc_state))
continue;
- intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
- if (old_crtc_state->active) {
- intel_crtc_disable_planes(intel_state, intel_crtc);
+ if (old_crtc_state->base.active) {
+ intel_crtc_disable_planes(state, crtc);
/*
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- intel_crtc_disable_pipe_crc(intel_crtc);
+ intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display.crtc_disable(old_intel_crtc_state, state);
- intel_crtc->active = false;
- intel_fbc_disable(intel_crtc);
- intel_disable_shared_dpll(old_intel_crtc_state);
+ dev_priv->display.crtc_disable(old_crtc_state, state);
+ crtc->active = false;
+ intel_fbc_disable(crtc);
+ intel_disable_shared_dpll(old_crtc_state);
/*
* Underruns don't always raise
@@ -13823,25 +13943,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_pch_fifo_underruns(dev_priv);
/* FIXME unify this for all platforms */
- if (!new_crtc_state->active &&
+ if (!new_crtc_state->base.active &&
!HAS_GMCH(dev_priv) &&
dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(intel_state,
- new_intel_crtc_state);
+ dev_priv->display.initial_watermarks(state,
+ new_crtc_state);
}
}
- /* FIXME: Eventually get rid of our intel_crtc->config pointer */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
+ /* FIXME: Eventually get rid of our crtc->config pointer */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ crtc->config = new_crtc_state;
- if (intel_state->modeset) {
- drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+ if (state->modeset) {
+ drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
intel_set_cdclk_pre_plane_update(dev_priv,
- &intel_state->cdclk.actual,
+ &state->cdclk.actual,
&dev_priv->cdclk.actual,
- intel_state->cdclk.pipe);
+ state->cdclk.pipe);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -13850,31 +13970,37 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev, state);
+ intel_modeset_verify_disabled(dev_priv, state);
}
/* Complete the events for pipes that have now been disabled */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
bool modeset = needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
- if (modeset && !new_crtc_state->active && new_crtc_state->event) {
+ if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
spin_unlock_irq(&dev->event_lock);
- new_crtc_state->event = NULL;
+ new_crtc_state->base.event = NULL;
}
}
+ if (state->modeset)
+ intel_encoders_update_prepare(state);
+
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.update_crtcs(state);
- if (intel_state->modeset)
+ if (state->modeset) {
+ intel_encoders_update_complete(state);
+
intel_set_cdclk_post_plane_update(dev_priv,
- &intel_state->cdclk.actual,
+ &state->cdclk.actual,
&dev_priv->cdclk.actual,
- intel_state->cdclk.pipe);
+ state->cdclk.pipe);
+ }
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
@@ -13885,16 +14011,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- drm_atomic_helper_wait_for_flip_done(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, &state->base);
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
- if (new_crtc_state->active &&
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->base.active &&
!needs_modeset(new_crtc_state) &&
- (new_intel_crtc_state->base.color_mgmt_changed ||
- new_intel_crtc_state->update_pipe))
- intel_color_load_luts(new_intel_crtc_state);
+ (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
}
/*
@@ -13904,16 +14028,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_state,
- new_intel_crtc_state);
+ dev_priv->display.optimize_watermarks(state,
+ new_crtc_state);
}
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ intel_post_plane_update(old_crtc_state);
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -13921,15 +14043,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
- if (intel_state->modeset)
- intel_verify_planes(intel_state);
+ if (state->modeset)
+ intel_verify_planes(state);
- if (intel_state->modeset && intel_can_enable_sagv(state))
+ if (state->modeset && intel_can_enable_sagv(state))
intel_enable_sagv(dev_priv);
- drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_commit_hw_done(&state->base);
- if (intel_state->modeset) {
+ if (state->modeset) {
/* As one of the primary mmio accessors, KMS has a high
* likelihood of triggering bugs in unclaimed access. After we
* finish modesetting, see if an error has been flagged, and if
@@ -13939,7 +14061,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -13949,14 +14071,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* schedule point (cond_resched()) here anyway to keep latencies
* down.
*/
- INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
- queue_work(system_highpri_wq, &state->commit_work);
+ INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
+ queue_work(system_highpri_wq, &state->base.commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
{
- struct drm_atomic_state *state =
- container_of(work, struct drm_atomic_state, commit_work);
+ struct intel_atomic_state *state =
+ container_of(work, struct intel_atomic_state, base.commit_work);
intel_atomic_commit_tail(state);
}
@@ -13986,42 +14108,31 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
return NOTIFY_DONE;
}
-static void intel_atomic_track_fbs(struct drm_atomic_state *state)
+static void intel_atomic_track_fbs(struct intel_atomic_state *state)
{
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane *plane;
+ struct intel_plane_state *old_plane_state, *new_plane_state;
+ struct intel_plane *plane;
int i;
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
- i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
- intel_fb_obj(new_plane_state->fb),
- to_intel_plane(plane)->frontbuffer_bit);
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i)
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
+ to_intel_frontbuffer(new_plane_state->base.fb),
+ plane->frontbuffer_bit);
}
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * RETURNS
- * Zero for success or -errno.
- */
static int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
+ struct drm_atomic_state *_state,
bool nonblock)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_atomic_state_get(state);
- i915_sw_fence_init(&intel_state->commit_ready,
+ drm_atomic_state_get(&state->base);
+ i915_sw_fence_init(&state->commit_ready,
intel_atomic_commit_ready);
/*
@@ -14041,63 +14152,61 @@ static int intel_atomic_commit(struct drm_device *dev,
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+ if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
- for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
if (new_crtc_state->wm.need_postvbl_update ||
new_crtc_state->update_wm_post)
- state->legacy_cursor_update = false;
+ state->base.legacy_cursor_update = false;
}
- ret = intel_atomic_prepare_commit(dev, state);
+ ret = intel_atomic_prepare_commit(state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&intel_state->commit_ready);
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ i915_sw_fence_commit(&state->commit_ready);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- ret = drm_atomic_helper_setup_commit(state, nonblock);
+ ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
if (!ret)
- ret = drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(&state->base, true);
if (ret) {
- i915_sw_fence_commit(&intel_state->commit_ready);
+ i915_sw_fence_commit(&state->commit_ready);
- drm_atomic_helper_cleanup_planes(dev, state);
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ drm_atomic_helper_cleanup_planes(dev, &state->base);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
- sizeof(intel_state->min_cdclk));
- memcpy(dev_priv->min_voltage_level,
- intel_state->min_voltage_level,
- sizeof(intel_state->min_voltage_level));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->cdclk.force_min_cdclk =
- intel_state->cdclk.force_min_cdclk;
+ if (state->modeset) {
+ memcpy(dev_priv->min_cdclk, state->min_cdclk,
+ sizeof(state->min_cdclk));
+ memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
+ sizeof(state->min_voltage_level));
+ dev_priv->active_crtcs = state->active_crtcs;
+ dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
- intel_cdclk_swap_state(intel_state);
+ intel_cdclk_swap_state(state);
}
- drm_atomic_state_get(state);
- INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+ drm_atomic_state_get(&state->base);
+ INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
- i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock && intel_state->modeset) {
- queue_work(dev_priv->modeset_wq, &state->commit_work);
+ i915_sw_fence_commit(&state->commit_ready);
+ if (nonblock && state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->base.commit_work);
} else if (nonblock) {
- queue_work(system_unbound_wq, &state->commit_work);
+ queue_work(system_unbound_wq, &state->base.commit_work);
} else {
- if (intel_state->modeset)
+ if (state->modeset)
flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
}
@@ -14105,18 +14214,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
-static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
- .set_config = drm_atomic_helper_set_config,
- .destroy = intel_crtc_destroy,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = intel_crtc_duplicate_state,
- .atomic_destroy_state = intel_crtc_destroy_state,
- .set_crc_source = intel_crtc_set_crc_source,
- .verify_crc_source = intel_crtc_verify_crc_source,
- .get_crc_sources = intel_crtc_get_crc_sources,
-};
-
struct wait_rps_boost {
struct wait_queue_entry wait;
@@ -14250,9 +14347,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
int ret;
if (old_obj) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(new_state->state,
- plane->state->crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(intel_state,
+ to_intel_crtc(plane->state->crtc));
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14305,7 +14402,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
fb_obj_bump_render_priority(obj);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
if (!new_state->fence) { /* implicit fencing */
struct dma_fence *fence;
@@ -14317,7 +14414,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret < 0)
return ret;
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence);
@@ -14413,7 +14510,7 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(&new_crtc_state->base);
+ bool modeset = needs_modeset(new_crtc_state);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -14466,7 +14563,7 @@ static void intel_finish_crtc_commit(struct intel_atomic_state *state,
intel_pipe_update_end(new_crtc_state);
if (new_crtc_state->update_pipe &&
- !needs_modeset(&new_crtc_state->base) &&
+ !needs_modeset(new_crtc_state) &&
old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -14568,19 +14665,18 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- int ret;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *old_fb;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->state);
struct intel_crtc_state *new_crtc_state;
+ int ret;
/*
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+ if (!crtc_state->base.active || needs_modeset(crtc_state) ||
crtc_state->update_pipe)
goto slow;
@@ -14642,11 +14738,10 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (ret)
goto out_unlock;
- intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
-
- old_fb = old_plane_state->fb;
- i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
- intel_plane->frontbuffer_bit);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
+ to_intel_frontbuffer(fb),
+ intel_plane->frontbuffer_bit);
/* Swap plane state */
plane->state = new_plane_state;
@@ -14910,8 +15005,76 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
scaler_state->scaler_id = -1;
}
+#define INTEL_CRTC_FUNCS \
+ .gamma_set = drm_atomic_helper_legacy_gamma_set, \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i945gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i945gm_enable_vblank,
+ .disable_vblank = i945gm_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+};
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ const struct drm_crtc_funcs *funcs;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
struct intel_plane *primary = NULL;
@@ -14955,10 +15118,28 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
}
intel_crtc->plane_ids_mask |= BIT(cursor->id);
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (IS_GEN(dev_priv, 4))
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv))
+ funcs = &i945gm_crtc_funcs;
+ else if (IS_GEN(dev_priv, 3))
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
+
ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
&primary->base, &cursor->base,
- &intel_crtc_funcs,
- "pipe %c", pipe_name(pipe));
+ funcs, "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -15114,12 +15295,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ELKHARTLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* TODO: initialize TC ports as well */
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ icl_dsi_init(dev_priv);
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
icl_dsi_init(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (IS_GEN(dev_priv, 11)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -15334,15 +15521,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
drm_framebuffer_cleanup(fb);
-
- i915_gem_object_lock(obj);
- WARN_ON(!obj->framebuffer_references--);
- i915_gem_object_unlock(obj);
-
- i915_gem_object_put(obj);
+ intel_frontbuffer_put(intel_fb->frontbuffer);
kfree(intel_fb);
}
@@ -15370,7 +15551,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
i915_gem_object_flush_if_display(obj);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
return 0;
}
@@ -15392,8 +15573,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
int ret = -EINVAL;
int i;
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer)
+ return -ENOMEM;
+
i915_gem_object_lock(obj);
- obj->framebuffer_references++;
tiling = i915_gem_object_get_tiling(obj);
stride = i915_gem_object_get_stride(obj);
i915_gem_object_unlock(obj);
@@ -15510,9 +15694,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
return 0;
err:
- i915_gem_object_lock(obj);
- obj->framebuffer_references--;
- i915_gem_object_unlock(obj);
+ intel_frontbuffer_put(intel_fb->frontbuffer);
return ret;
}
@@ -15530,8 +15712,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb))
- i915_gem_object_put(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -15775,8 +15956,8 @@ static void sanitize_watermarks(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
int i;
@@ -15831,13 +16012,11 @@ retry:
}
/* Write calculated watermark values back */
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ dev_priv->display.optimize_watermarks(intel_state, crtc_state);
- cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, cs);
-
- to_intel_crtc_state(crtc->state)->wm = cs->wm;
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
put_state:
@@ -15922,7 +16101,6 @@ out:
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
enum pipe pipe;
struct intel_crtc *crtc;
int ret;
@@ -16002,8 +16180,6 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = 256;
}
- dev->mode_config.fb_base = ggtt->gmadr.start;
-
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev_priv)->num_pipes,
INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
@@ -16495,6 +16671,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
&pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(dev_priv,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
pll->state.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -16744,6 +16927,17 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
+
+ /* Sanitize the TypeC port mode upfront, encoders depend on this */
+ for_each_intel_encoder(dev, encoder) {
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ /* We need to sanitize only the MST primary port. */
+ if (encoder->type != INTEL_OUTPUT_DP_MST &&
+ intel_phy_is_tc(dev_priv, phy))
+ intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
+ }
+
get_encoder_power_domains(dev_priv);
if (HAS_PCH_IBX(dev_priv))
@@ -16804,7 +16998,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
u64 put_domains;
crtc_state = to_intel_crtc_state(crtc->base.state);
- put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
+ put_domains = modeset_get_crtc_power_domains(crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -16866,7 +17060,7 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_modeset_cleanup(struct drm_device *dev)
+void intel_modeset_driver_remove(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16982,7 +17176,7 @@ struct intel_display_error_state {
u32 vtotal;
u32 vblank;
u32 vsync;
- } transcoder[4];
+ } transcoder[5];
};
struct intel_display_error_state *
@@ -16993,6 +17187,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
TRANSCODER_A,
TRANSCODER_B,
TRANSCODER_C,
+ TRANSCODER_D,
TRANSCODER_EDP,
};
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index ee6b8194a459..e57e6969051d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -28,8 +28,30 @@
#include <drm/drm_util.h>
#include <drm/i915_drm.h>
+enum link_m_n_set;
+struct dpll;
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_i915_error_state_buf;
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+struct drm_plane_state;
+struct i915_ggtt_view;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+struct intel_load_detect_pipe;
+struct intel_plane;
struct intel_plane_state;
+struct intel_remapped_info;
+struct intel_rotation_info;
enum i915_gpio {
GPIOA,
@@ -45,6 +67,8 @@ enum i915_gpio {
GPIOK,
GPIOL,
GPIOM,
+ GPION,
+ GPIOO,
};
/*
@@ -58,6 +82,7 @@ enum pipe {
PIPE_A = 0,
PIPE_B,
PIPE_C,
+ PIPE_D,
_PIPE_EDP,
I915_MAX_PIPES = _PIPE_EDP
@@ -75,6 +100,7 @@ enum transcoder {
TRANSCODER_A = PIPE_A,
TRANSCODER_B = PIPE_B,
TRANSCODER_C = PIPE_C,
+ TRANSCODER_D = PIPE_D,
/*
* The following transcoders can map to any pipe, their enum value
@@ -98,6 +124,8 @@ static inline const char *transcoder_name(enum transcoder transcoder)
return "B";
case TRANSCODER_C:
return "C";
+ case TRANSCODER_D:
+ return "D";
case TRANSCODER_EDP:
return "EDP";
case TRANSCODER_DSI_A:
@@ -173,6 +201,12 @@ static inline const char *port_identifier(enum port port)
return "Port E";
case PORT_F:
return "Port F";
+ case PORT_G:
+ return "Port G";
+ case PORT_H:
+ return "Port H";
+ case PORT_I:
+ return "Port I";
default:
return "<invalid>";
}
@@ -185,14 +219,15 @@ enum tc_port {
PORT_TC2,
PORT_TC3,
PORT_TC4,
+ PORT_TC5,
+ PORT_TC6,
I915_MAX_TC_PORTS
};
-enum tc_port_type {
- TC_PORT_UNKNOWN = 0,
- TC_PORT_TYPEC,
- TC_PORT_TBT,
+enum tc_port_mode {
+ TC_PORT_TBT_ALT,
+ TC_PORT_DP_ALT,
TC_PORT_LEGACY,
};
@@ -229,6 +264,30 @@ struct intel_link_m_n {
u32 link_n;
};
+enum phy {
+ PHY_NONE = -1,
+
+ PHY_A = 0,
+ PHY_B,
+ PHY_C,
+ PHY_D,
+ PHY_E,
+ PHY_F,
+ PHY_G,
+ PHY_H,
+ PHY_I,
+
+ I915_MAX_PHYS
+};
+
+#define phy_name(a) ((a) + 'A')
+
+enum phy_fia {
+ FIA1,
+ FIA2,
+ FIA3,
+};
+
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
@@ -254,6 +313,10 @@ struct intel_link_m_n {
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
for_each_if((__ports_mask) & BIT(__port))
+#define for_each_phy_masked(__phy, __phys_mask) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if((__phys_mask) & BIT(__phy))
+
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
@@ -357,5 +420,173 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+
+void intel_plane_destroy(struct drm_plane *plane);
+void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq);
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
+void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *state,
+ int plane);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *state, int plane);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+int intel_display_suspend(struct drm_device *dev);
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport,
+ unsigned int expected_mask);
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags);
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+int intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+bool intel_fuzzy_clock_check(int clock1, int clock2);
+
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+ enum link_m_n_set m_n);
+void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+
+bool intel_crtc_active(struct intel_crtc *crtc);
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
+enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+ struct intel_crtc_state *pipe_config);
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+int skl_max_scale(const struct intel_crtc_state *crtc_state,
+ u32 pixel_format);
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
+u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int plane);
+int skl_check_plane_surface(struct intel_plane_state *plane_state);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
+void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_display_error_state *error);
+
+/* modesetting */
+void intel_modeset_init_hw(struct drm_device *dev);
+int intel_modeset_init(struct drm_device *dev);
+void intel_modeset_driver_remove(struct drm_device *dev);
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
+void intel_display_resume(struct drm_device *dev);
+void i915_redisable_vga(struct drm_i915_private *dev_priv);
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+
+/* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem. This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ if (!WARN(i915_modparams.verbose_state_checks, format)) \
+ DRM_ERROR(format); \
+ unlikely(__ret_warn_on); \
+})
+
+#define I915_STATE_WARN_ON(x) \
+ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 2d1939db108f..12099760d99e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -13,17 +13,22 @@
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
#include "intel_csr.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_hotplug.h"
#include "intel_sideband.h"
+#include "intel_tc.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain)
+intel_display_power_domain_str(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain)
{
+ bool ddi_tc_ports = IS_GEN(i915, 12);
+
switch (domain) {
case POWER_DOMAIN_DISPLAY_CORE:
return "DISPLAY_CORE";
@@ -33,22 +38,28 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PIPE_B";
case POWER_DOMAIN_PIPE_C:
return "PIPE_C";
+ case POWER_DOMAIN_PIPE_D:
+ return "PIPE_D";
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
return "PIPE_A_PANEL_FITTER";
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
return "PIPE_B_PANEL_FITTER";
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
+ return "PIPE_D_PANEL_FITTER";
case POWER_DOMAIN_TRANSCODER_A:
return "TRANSCODER_A";
case POWER_DOMAIN_TRANSCODER_B:
return "TRANSCODER_B";
case POWER_DOMAIN_TRANSCODER_C:
return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_D:
+ return "TRANSCODER_D";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
- case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
- return "TRANSCODER_EDP_VDSC";
+ case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
+ return "TRANSCODER_VDSC_PW2";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -60,11 +71,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_D_LANES:
- return "PORT_DDI_D_LANES";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
+ POWER_DOMAIN_PORT_DDI_TC1_LANES);
+ return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
- return "PORT_DDI_E_LANES";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
+ POWER_DOMAIN_PORT_DDI_TC2_LANES);
+ return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_F_LANES:
- return "PORT_DDI_F_LANES";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
+ POWER_DOMAIN_PORT_DDI_TC3_LANES);
+ return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_TC4_LANES:
+ return "PORT_DDI_TC4_LANES";
+ case POWER_DOMAIN_PORT_DDI_TC5_LANES:
+ return "PORT_DDI_TC5_LANES";
+ case POWER_DOMAIN_PORT_DDI_TC6_LANES:
+ return "PORT_DDI_TC6_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@@ -72,11 +95,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
case POWER_DOMAIN_PORT_DDI_C_IO:
return "PORT_DDI_C_IO";
case POWER_DOMAIN_PORT_DDI_D_IO:
- return "PORT_DDI_D_IO";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
+ POWER_DOMAIN_PORT_DDI_TC1_IO);
+ return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
- return "PORT_DDI_E_IO";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
+ POWER_DOMAIN_PORT_DDI_TC2_IO);
+ return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DDI_F_IO:
- return "PORT_DDI_F_IO";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
+ POWER_DOMAIN_PORT_DDI_TC3_IO);
+ return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DDI_TC4_IO:
+ return "PORT_DDI_TC4_IO";
+ case POWER_DOMAIN_PORT_DDI_TC5_IO:
+ return "PORT_DDI_TC5_IO";
+ case POWER_DOMAIN_PORT_DDI_TC6_IO:
+ return "PORT_DDI_TC6_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -94,11 +129,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
- return "AUX_D";
+ BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
+ return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
case POWER_DOMAIN_AUX_E:
- return "AUX_E";
+ BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
+ return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
case POWER_DOMAIN_AUX_F:
- return "AUX_F";
+ BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
+ return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
+ case POWER_DOMAIN_AUX_TC4:
+ return "AUX_TC4";
+ case POWER_DOMAIN_AUX_TC5:
+ return "AUX_TC5";
+ case POWER_DOMAIN_AUX_TC6:
+ return "AUX_TC6";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
case POWER_DOMAIN_AUX_TBT1:
@@ -109,6 +153,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_TBT3";
case POWER_DOMAIN_AUX_TBT4:
return "AUX_TBT4";
+ case POWER_DOMAIN_AUX_TBT5:
+ return "AUX_TBT5";
+ case POWER_DOMAIN_AUX_TBT6:
+ return "AUX_TBT6";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -117,6 +165,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "MODESET";
case POWER_DOMAIN_GT_IRQ:
return "GT_IRQ";
+ case POWER_DOMAIN_DPLL_DC_OFF:
+ return "DPLL_DC_OFF";
default:
MISSING_CASE(domain);
return "?";
@@ -269,11 +319,14 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore,
- regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- 1));
+ if (intel_de_wait_for_set(dev_priv, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
+ DRM_DEBUG_KMS("%s power well enable timeout\n",
+ power_well->desc->name);
+
+ /* An AUX timeout is expected if the TBT DP tunnel is down. */
+ WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+ }
}
static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
@@ -324,9 +377,8 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg),
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -388,7 +440,7 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
@@ -396,21 +448,29 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
+ int wa_idx_max;
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+ if (INTEL_GEN(dev_priv) < 12) {
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
+ }
hsw_wait_for_power_well_enable(dev_priv, power_well);
- /* Display WA #1178: icl */
- if (IS_ICELAKE(dev_priv) &&
- pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, port)) {
+ /* Display WA #1178: icl, tgl */
+ if (IS_TIGERLAKE(dev_priv))
+ wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
+ else
+ wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
+
+ if (!IS_ELKHARTLAKE(dev_priv) &&
+ pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
+ !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
@@ -423,11 +483,13 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+ if (INTEL_GEN(dev_priv) < 12) {
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
+ }
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -441,26 +503,108 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+
+ return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
+
+static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int refs = hweight64(power_well->desc->domains &
+ async_put_domains_mask(&dev_priv->power_domains));
+
+ WARN_ON(refs > power_well->count);
+
+ return refs;
+}
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ /* Bypass the check if all references are released asynchronously */
+ if (power_well_async_ref_count(dev_priv, power_well) ==
+ power_well->count)
+ return;
+
+ aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (!intel_phy_is_tc(dev_priv, phy))
+ continue;
+
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(&encoder->base);
+ if (WARN_ON(!dig_port))
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ if (WARN_ON(!dig_port))
+ return;
+
+ WARN_ON(!intel_tc_port_ref_held(dig_port));
+}
+
+#else
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+#endif
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int pw_idx = power_well->desc->hsw.idx;
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
- enum aux_ch aux_ch;
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
u32 val;
- aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
- ICL_AUX_PW_TO_CH(pw_idx);
+ icl_tc_port_assert_ref_held(dev_priv, power_well);
+
val = I915_READ(DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
+ if (power_well->desc->hsw.is_tc_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
}
+static void
+icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ icl_tc_port_assert_ref_held(dev_priv, power_well);
+
+ hsw_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -580,7 +724,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -640,7 +784,7 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
dev_priv->csr.dc_state = val & mask;
}
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
@@ -655,7 +799,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc9(dev_priv);
@@ -709,7 +853,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
@@ -733,7 +877,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
@@ -819,8 +963,7 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
"Unexpected DBuf power power state (0x%08x)\n", tmp);
}
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = {};
@@ -844,6 +987,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
intel_combo_phy_init(dev_priv);
}
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ gen9_disable_dc_states(dev_priv);
+}
+
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1071,7 +1220,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
intel_power_sequencer_reset(dev_priv);
@@ -1232,11 +1381,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- phy_status_mask,
- phy_status,
- 10))
+ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
phy_status, dev_priv->chv_phy_control);
@@ -1267,11 +1413,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy),
- PHY_POWERGOOD(phy),
- 1))
+ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
vlv_dpio_get(dev_priv);
@@ -1575,12 +1718,15 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
+ struct drm_i915_private *i915 =
+ container_of(power_domains, struct drm_i915_private,
+ power_domains);
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(domain),
+ intel_display_power_domain_str(i915, domain),
power_domains->domain_use_count[domain]);
}
@@ -1750,7 +1896,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
- const char *name = intel_display_power_domain_str(domain);
+ const char *name = intel_display_power_domain_str(dev_priv, domain);
power_domains = &dev_priv->power_domains;
@@ -2332,15 +2478,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
@@ -2359,7 +2500,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
* - KVMR (HW control)
@@ -2368,6 +2509,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
ICL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define ICL_DDI_IO_A_POWER_DOMAINS ( \
@@ -2405,6 +2547,87 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+#define TGL_PW_5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_D) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_4_POWER_DOMAINS ( \
+ TGL_PW_5_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_3_POWER_DOMAINS ( \
+ TGL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_2_POWER_DOMAINS ( \
+ TGL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ TGL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
+#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
+#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
+#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
+#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
+#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
+
+#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC1))
+#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC2))
+#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC3))
+#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC4))
+#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC5))
+#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC6))
+#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5))
+#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -3113,7 +3336,7 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = icl_tc_phy_aux_power_well_enable,
- .disable = hsw_power_well_disable,
+ .disable = icl_tc_phy_aux_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
@@ -3362,6 +3585,335 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
+static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = TGL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 3",
+ .domains = TGL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ }
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ }
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ }
+ },
+ {
+ .name = "DDI TC1 IO",
+ .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+ },
+ },
+ {
+ .name = "DDI TC2 IO",
+ .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+ },
+ },
+ {
+ .name = "DDI TC3 IO",
+ .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
+ },
+ },
+ {
+ .name = "DDI TC4 IO",
+ .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
+ },
+ },
+ {
+ .name = "DDI TC5 IO",
+ .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
+ },
+ },
+ {
+ .name = "DDI TC6 IO",
+ .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX TC1",
+ .domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC2",
+ .domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC3",
+ .domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC4",
+ .domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC5",
+ .domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC6",
+ .domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT5",
+ .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT6",
+ .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = TGL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ }
+ },
+ {
+ .name = "power well 5",
+ .domains = TGL_PW_5_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_D),
+ },
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -3489,7 +4041,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_GEN(dev_priv, 11)) {
+ if (IS_GEN(dev_priv, 12)) {
+ err = set_power_wells(power_domains, tgl_power_wells);
+ } else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3773,8 +4327,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -3829,8 +4382,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
DRM_ERROR("LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -3872,7 +4424,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -3888,7 +4440,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
hsw_disable_lcpll(dev_priv, true, true);
}
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -3963,7 +4515,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -3988,8 +4540,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-void bxt_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
@@ -4020,12 +4571,12 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -4085,7 +4636,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -4111,8 +4662,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_combo_phy_uninit(dev_priv);
}
-void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+static void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
@@ -4147,12 +4698,12 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -4337,7 +4888,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
*
* It will return with power domains disabled (to be enabled later by
* intel_power_domains_enable()) and must be paired with
- * intel_power_domains_fini_hw().
+ * intel_power_domains_driver_remove().
*/
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
@@ -4389,7 +4940,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
}
/**
- * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * intel_power_domains_driver_remove - deinitialize hw power domain state
* @i915: i915 device instance
*
* De-initializes the display power domain HW state. It also ensures that the
@@ -4399,7 +4950,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_fini_hw(struct drm_i915_private *i915)
+void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&i915->power_domains.wakeref);
@@ -4553,7 +5104,8 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_domain(domain, power_well->desc->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(domain),
+ intel_display_power_domain_str(i915,
+ domain),
power_domains->domain_use_count[domain]);
}
}
@@ -4623,3 +5175,58 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
}
#endif
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
+ bxt_enable_dc9(i915);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ hsw_enable_pc8(i915);
+}
+
+void intel_display_power_resume_early(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
+ gen9_sanitize_dc_state(i915);
+ bxt_disable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+}
+
+void intel_display_power_suspend(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11) {
+ icl_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_enable_pc8(i915);
+ }
+}
+
+void intel_display_power_resume(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11) {
+ bxt_disable_dc9(i915);
+ icl_display_core_init(i915, true);
+ if (i915->csr.dmc_payload) {
+ if (i915->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(i915);
+ else if (i915->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(i915);
+ }
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_disable_dc9(i915);
+ bxt_display_core_init(i915, true);
+ if (i915->csr.dmc_payload &&
+ (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ gen9_enable_dc5(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index ff57b0a7fe59..a50605b8b1ad 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -18,28 +18,47 @@ enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_D,
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_D_PANEL_FITTER,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_D,
POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_EDP_VDSC,
+ /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_TC4_LANES,
+ POWER_DOMAIN_PORT_DDI_TC5_LANES,
+ POWER_DOMAIN_PORT_DDI_TC6_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DDI_G_IO,
+ POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO,
+ POWER_DOMAIN_PORT_DDI_H_IO,
+ POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO,
+ POWER_DOMAIN_PORT_DDI_I_IO,
+ POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -49,21 +68,51 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
+ POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_TC4,
+ POWER_DOMAIN_AUX_TC5,
+ POWER_DOMAIN_AUX_TC6,
POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_AUX_TBT1,
POWER_DOMAIN_AUX_TBT2,
POWER_DOMAIN_AUX_TBT3,
POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_DPLL_DC_OFF,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
};
+/*
+ * i915_power_well_id:
+ *
+ * IDs used to look up power wells. Power wells accessed directly bypassing
+ * the power domains framework must be assigned a unique ID. The rest of power
+ * wells must be assigned DISP_PW_ID_NONE.
+ */
+enum i915_power_well_id {
+ DISP_PW_ID_NONE,
+
+ VLV_DISP_PW_DISP2D,
+ BXT_DISP_PW_DPIO_CMN_A,
+ VLV_DISP_PW_DPIO_CMN_BC,
+ GLK_DISP_PW_DPIO_CMN_C,
+ CHV_DISP_PW_DPIO_CMN_D,
+ HSW_DISP_PW_GLOBAL,
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_1,
+ SKL_DISP_PW_2,
+};
+
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -204,30 +253,24 @@ struct i915_power_domains {
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-
int intel_power_domains_init(struct drm_i915_private *dev_priv);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
enum i915_drm_suspend_mode);
void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915);
+void intel_display_power_resume_early(struct drm_i915_private *i915);
+void intel_display_power_suspend(struct drm_i915_private *i915);
+void intel_display_power_resume(struct drm_i915_private *i915);
const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
+intel_display_power_domain_str(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index f11979879e7b..449abaea619f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -22,8 +22,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef __INTEL_DRV_H__
-#define __INTEL_DRV_H__
+
+#ifndef __INTEL_DISPLAY_TYPES_H__
+#define __INTEL_DISPLAY_TYPES_H__
#include <linux/async.h>
#include <linux/i2c.h>
@@ -67,8 +68,23 @@ enum intel_output_type {
INTEL_OUTPUT_DP_MST = 11,
};
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+/* "Broadcast RGB" property */
+enum intel_broadcast_rgb {
+ INTEL_BROADCAST_RGB_AUTO,
+ INTEL_BROADCAST_RGB_FULL,
+ INTEL_BROADCAST_RGB_LIMITED,
+};
+
struct intel_framebuffer {
struct drm_framebuffer base;
+ struct intel_frontbuffer *frontbuffer;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
@@ -101,20 +117,30 @@ struct intel_fbdev {
struct mutex hpd_lock;
};
+enum intel_hotplug_state {
+ INTEL_HOTPLUG_UNCHANGED,
+ INTEL_HOTPLUG_CHANGED,
+ INTEL_HOTPLUG_RETRY,
+};
+
struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
enum port port;
unsigned int cloneable;
- bool (*hotplug)(struct intel_encoder *encoder,
- struct intel_connector *connector);
+ enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
int (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
+ void (*update_prepare)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
void (*pre_pll_enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -124,6 +150,9 @@ struct intel_encoder {
void (*enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*update_complete)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
void (*disable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -812,6 +841,15 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
+ /*
+ * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
+ * setting shared_dpll and dpll_hw_state to one of these reserved ones.
+ */
+ struct icl_port_dpll {
+ struct intel_shared_dpll *pll;
+ struct intel_dpll_hw_state hw_state;
+ } icl_port_dplls[ICL_PORT_DPLL_COUNT];
+
/* DSI PLL registers */
struct {
u32 ctrl, div;
@@ -1224,8 +1262,13 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ struct mutex tc_lock; /* protects the TypeC port mode */
+ intel_wakeref_t tc_lock_wakeref;
+ int tc_link_refcount;
bool tc_legacy_port:1;
- enum tc_port_type tc_type;
+ char tc_port_name[8];
+ enum tc_port_mode tc_mode;
+ enum phy_fia tc_phy_fia;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1446,41 +1489,6 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
}
/* intel_display.c */
-void intel_plane_destroy(struct drm_plane *plane);
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
- const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg);
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-void intel_init_display_hooks(struct drm_i915_private *dev_priv);
-unsigned int intel_fb_xy_to_linear(int x, int y,
- const struct intel_plane_state *state,
- int plane);
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
-void intel_add_fb_offsets(int *x, int *y,
- const struct intel_plane_state *state, int plane);
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
-int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
-void intel_encoder_destroy(struct drm_encoder *encoder);
-struct drm_display_mode *
-intel_encoder_current_mode(struct intel_encoder *encoder);
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
- enum port port);
-int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
static inline bool
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
enum intel_output_type type)
@@ -1509,108 +1517,9 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
intel_wait_for_vblank(dev_priv, pipe);
}
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
-
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport,
- unsigned int expected_mask);
-int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
-void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags);
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
-int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
-
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
- const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
-bool intel_fuzzy_clock_check(int clock1, int clock2);
-
-/* modesetting asserts */
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
-#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_prepare_reset(struct drm_i915_private *dev_priv);
-void intel_finish_reset(struct drm_i915_private *dev_priv);
-void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
- enum link_m_n_set m_n);
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock);
-int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-
-bool intel_crtc_active(struct intel_crtc *crtc);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
-enum intel_display_power_domain intel_port_to_power_domain(enum port port);
-enum intel_display_power_domain
-intel_aux_power_domain(struct intel_digital_port *dig_port);
-void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_state *pipe_config);
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
-
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format);
-
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
return i915_ggtt_offset(state->vma);
}
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
- int plane);
-int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
-
-#endif /* __INTEL_DRV_H__ */
+#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index d0fc34826771..921ad0a2f7ba 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -44,15 +44,16 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -62,6 +63,7 @@
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_sideband.h"
+#include "intel_tc.h"
#include "intel_vdsc.h"
#define DP_DPRX_ESI_LEN 14
@@ -211,47 +213,13 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp->common_rates[intel_dp->num_common_rates - 1];
}
-static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- intel_wakeref_t wakeref;
- u32 lane_info;
-
- if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
- return 4;
-
- lane_info = 0;
- with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
- switch (lane_info) {
- default:
- MISSING_CASE(lane_info);
- /* fall through */
- case 1:
- case 2:
- case 4:
- case 8:
- return 1;
- case 3:
- case 12:
- return 2;
- case 15:
- return 4;
- }
-}
-
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
- int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
+ int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
return min3(source_max, sink_max, fia_max);
}
@@ -330,9 +298,9 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_port_is_combophy(dev_priv, port) &&
+ if (intel_phy_is_combo(dev_priv, phy) &&
!IS_ELKHARTLAKE(dev_priv) &&
!intel_dp_is_edp(intel_dp))
return 540000;
@@ -1209,7 +1177,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
- if (intel_dig_port->tc_type == TC_PORT_TBT)
+ if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
@@ -1225,6 +1193,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
struct drm_i915_private *i915 =
to_i915(intel_dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain =
@@ -1240,6 +1210,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+ if (is_tc_port)
+ intel_tc_port_lock(intel_dig_port);
+
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1392,6 +1365,9 @@ out:
pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+ if (is_tc_port)
+ intel_tc_port_unlock(intel_dig_port);
+
return ret;
}
@@ -1879,8 +1855,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
int mode_rate, link_clock, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
+
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- bpp);
+ output_bpp);
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
for (lane_count = limits->min_lane_count;
@@ -2393,9 +2371,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (intel_wait_for_register(&dev_priv->uncore,
- pp_stat_reg, mask, value,
- 5000))
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
@@ -3982,10 +3959,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
- DP_TP_STATUS_IDLE_DONE,
- DP_TP_STATUS_IDLE_DONE,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -4169,10 +4144,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
- dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
- DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
-
/*
* Read the eDP display control registers.
*
@@ -4244,8 +4215,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
- /* Don't clobber cached eDP rates. */
+ /*
+ * Don't clobber cached eDP rates. Also skip re-reading
+ * the OUI/ID since we know it won't change.
+ */
if (!intel_dp_is_edp(intel_dp)) {
+ drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+ drm_dp_is_branch(intel_dp->dpcd));
+
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@@ -4254,7 +4231,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* Some eDP panels do not set a valid value for sink count, that is why
* it don't care about read it here and in intel_edp_init_dpcd().
*/
- if (!intel_dp_is_edp(intel_dp)) {
+ if (!intel_dp_is_edp(intel_dp) &&
+ !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -4879,14 +4857,16 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
* retrain the link to get a picture. That's in case no
* userspace component reacted to intermittent HPD dip.
*/
-static bool intel_dp_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_dp_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
struct drm_modeset_acquire_ctx ctx;
- bool changed;
+ enum intel_hotplug_state state;
int ret;
- changed = intel_encoder_hotplug(encoder, connector);
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4905,7 +4885,14 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- return changed;
+ /*
+ * Keeping it consistent with intel_ddi_hotplug() and
+ * intel_hdmi_hotplug().
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
}
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
@@ -5233,204 +5220,16 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
-static const char *tc_type_name(enum tc_port_type type)
-{
- static const char * const names[] = {
- [TC_PORT_UNKNOWN] = "unknown",
- [TC_PORT_LEGACY] = "legacy",
- [TC_PORT_TYPEC] = "typec",
- [TC_PORT_TBT] = "tbt",
- };
-
- if (WARN_ON(type >= ARRAY_SIZE(names)))
- type = TC_PORT_UNKNOWN;
-
- return names[type];
-}
-
-static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port,
- bool is_legacy, bool is_typec, bool is_tbt)
-{
- enum port port = intel_dig_port->base.port;
- enum tc_port_type old_type = intel_dig_port->tc_type;
-
- WARN_ON(is_legacy + is_typec + is_tbt != 1);
-
- if (is_legacy)
- intel_dig_port->tc_type = TC_PORT_LEGACY;
- else if (is_typec)
- intel_dig_port->tc_type = TC_PORT_TYPEC;
- else if (is_tbt)
- intel_dig_port->tc_type = TC_PORT_TBT;
- else
- return;
-
- /* Types are not supposed to be changed at runtime. */
- WARN_ON(old_type != TC_PORT_UNKNOWN &&
- old_type != intel_dig_port->tc_type);
-
- if (old_type != intel_dig_port->tc_type)
- DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
- tc_type_name(intel_dig_port->tc_type));
-}
-
-/*
- * This function implements the first part of the Connect Flow described by our
- * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
- * lanes, EDID, etc) is done as needed in the typical places.
- *
- * Unlike the other ports, type-C ports are not available to use as soon as we
- * get a hotplug. The type-C PHYs can be shared between multiple controllers:
- * display, USB, etc. As a result, handshaking through FIA is required around
- * connect and disconnect to cleanly transfer ownership with the controller and
- * set the type-C power state.
- *
- * We could opt to only do the connect flow when we actually try to use the AUX
- * channels or do a modeset, then immediately run the disconnect flow after
- * usage, but there are some implications on this for a dynamic environment:
- * things may go away or change behind our backs. So for now our driver is
- * always trying to acquire ownership of the controller as soon as it gets an
- * interrupt (or polls state and sees a port is connected) and only gives it
- * back when it sees a disconnect. Implementation of a more fine-grained model
- * will require a lot of coordination with user space and thorough testing for
- * the extra possible cases.
- */
-static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
-{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val;
-
- if (dig_port->tc_type != TC_PORT_LEGACY &&
- dig_port->tc_type != TC_PORT_TYPEC)
- return true;
-
- val = I915_READ(PORT_TX_DFLEXDPPMS);
- if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
- DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
- WARN_ON(dig_port->tc_legacy_port);
- return false;
- }
-
- /*
- * This function may be called many times in a row without an HPD event
- * in between, so try to avoid the write when we can.
- */
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
- val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
- }
-
- /*
- * Now we have to re-check the live state, in case the port recently
- * became disconnected. Not necessary for legacy mode.
- */
- if (dig_port->tc_type == TC_PORT_TYPEC &&
- !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
- DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
- icl_tc_phy_disconnect(dev_priv, dig_port);
- return false;
- }
-
- return true;
-}
-
-/*
- * See the comment at the connect function. This implements the Disconnect
- * Flow.
- */
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
-{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-
- if (dig_port->tc_type == TC_PORT_UNKNOWN)
- return;
-
- /*
- * TBT disconnection flow is read the live status, what was done in
- * caller.
- */
- if (dig_port->tc_type == TC_PORT_TYPEC ||
- dig_port->tc_type == TC_PORT_LEGACY) {
- u32 val;
-
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
- }
-
- DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
- port_name(dig_port->base.port),
- tc_type_name(dig_port->tc_type));
-
- dig_port->tc_type = TC_PORT_UNKNOWN;
-}
-
-/*
- * The type-C ports are different because even when they are connected, they may
- * not be available/usable by the graphics driver: see the comment on
- * icl_tc_phy_connect(). So in our driver instead of adding the additional
- * concept of "usable" and make everything check for "connected and usable" we
- * define a port as "connected" when it is not only connected, but also when it
- * is usable by the rest of the driver. That maintains the old assumption that
- * connected ports are usable, and avoids exposing to the users objects they
- * can't really use.
- */
-static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port)
-{
- enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- bool is_legacy, is_typec, is_tbt;
- u32 dpsp;
-
- /*
- * Complain if we got a legacy port HPD, but VBT didn't mark the port as
- * legacy. Treat the port as legacy from now on.
- */
- if (!intel_dig_port->tc_legacy_port &&
- I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
- DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
- port_name(port));
- intel_dig_port->tc_legacy_port = true;
- }
- is_legacy = intel_dig_port->tc_legacy_port;
-
- /*
- * The spec says we shouldn't be using the ISR bits for detecting
- * between TC and TBT. We should use DFLEXDPSP.
- */
- dpsp = I915_READ(PORT_TX_DFLEXDPSP);
- is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
- is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
-
- if (!is_legacy && !is_typec && !is_tbt) {
- icl_tc_phy_disconnect(dev_priv, intel_dig_port);
-
- return false;
- }
-
- icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
- is_tbt);
-
- if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
- return false;
-
- return true;
-}
-
static bool icl_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (intel_port_is_combophy(dev_priv, encoder->port))
+ if (intel_phy_is_combo(dev_priv, phy))
return icl_combo_port_connected(dev_priv, dig_port);
- else if (intel_port_is_tc(dev_priv, encoder->port))
- return icl_tc_port_connected(dev_priv, dig_port);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return intel_tc_port_connected(dig_port);
else
MISSING_CASE(encoder->hpd_pin);
@@ -5588,9 +5387,6 @@ intel_dp_detect(struct drm_connector *connector,
if (INTEL_GEN(dev_priv) >= 11)
intel_dp_get_dsc_sink_cap(intel_dp);
- drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
- drm_dp_is_branch(intel_dp->dpcd));
-
intel_dp_configure_mst(intel_dp);
if (intel_dp->is_mst) {
@@ -6016,47 +5812,49 @@ struct hdcp2_dp_errata_stream_type {
u8 stream_type;
} __packed;
-static struct hdcp2_dp_msg_data {
+struct hdcp2_dp_msg_data {
u8 msg_id;
u32 offset;
bool msg_detectable;
u32 timeout;
u32 timeout2; /* Added for non_paired situation */
- } hdcp2_msg_data[] = {
- {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
- {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
- false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
- {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
- false, 0, 0},
- {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
- false, 0, 0},
- {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
- true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
- {HDCP_2_2_AKE_SEND_PAIRING_INFO,
- DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
- HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
- {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
- {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
- false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
- {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_SEND_RECVID_LIST,
- DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
- {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_STREAM_MANAGE,
- DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
- false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+};
+
+static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
+ { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+ { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
+ { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
+ { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+ { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
/* local define to shovel this through the write_2_2 interface */
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
- {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
- DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
- 0, 0},
- };
+ { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0 },
+};
static inline
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
@@ -6110,7 +5908,7 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
- struct hdcp2_dp_msg_data *hdcp2_msg_data)
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
@@ -6149,13 +5947,13 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
return ret;
}
-static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
- if (hdcp2_msg_data[i].msg_id == msg_id)
- return &hdcp2_msg_data[i];
+ for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
+ if (hdcp2_dp_msg_data[i].msg_id == msg_id)
+ return &hdcp2_dp_msg_data[i];
return NULL;
}
@@ -6169,7 +5967,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
- struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -6233,7 +6031,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
- struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
@@ -6835,8 +6633,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
int refresh_rate)
{
- struct intel_encoder *encoder;
- struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
@@ -6851,9 +6647,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- dig_port = dp_to_dig_port(intel_dp);
- encoder = &dig_port->base;
-
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
return;
@@ -7333,6 +7126,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -7359,7 +7153,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- WARN_ON(intel_port_is_tc(dev_priv, port));
+ WARN_ON(intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index da70b1a41c83..657bbb1f5ed0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -112,8 +112,6 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7ded95a334db..020422da2ae2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,8 +22,8 @@
*
*/
+#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
@@ -264,8 +264,11 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
+ struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
- if (!i915_modparams.enable_dpcd_backlight)
+ if (i915_modparams.enable_dpcd_backlight == 0 ||
+ (i915_modparams.enable_dpcd_backlight == -1 &&
+ dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9b1fccea966b..2a1130dd1ad0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -21,9 +21,9 @@
* IN THE SOFTWARE.
*/
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
-#include "intel_drv.h"
static void
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8aa6a31e8ad0..6df240a01b8c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -32,10 +32,10 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
@@ -346,11 +346,8 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_wait_for_register(&dev_priv->uncore,
- DP_TP_STATUS(port),
- DP_TP_STATUS_ACT_SENT,
- DP_TP_STATUS_ACT_SENT,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -618,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = 0x7;
+ intel_encoder->crtc_mask = BIT(pipe);
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
@@ -648,6 +645,12 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
}
int
+intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+{
+ return intel_dig_port->dp.active_mst_links;
+}
+
+int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 1470c6e0514b..f660ad80db04 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -10,5 +10,6 @@ struct intel_digital_port;
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
+int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 7ccf7f3974db..556d1b30f06a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -23,8 +23,8 @@
#include "display/intel_dp.h"
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_sideband.h"
/**
@@ -345,10 +345,8 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_PORT_REF_DW3(phy),
- GRC_DONE, GRC_DONE,
- 10))
+ if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
+ GRC_DONE, 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 2d4e7b9a7b9d..b8148f838354 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -21,9 +21,9 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll_mgr.h"
-#include "intel_drv.h"
/**
* DOC: Display PLLs
@@ -36,9 +36,10 @@
* This file provides an abstraction over display PLLs. The function
* intel_shared_dpll_init() initializes the PLLs for the given platform. The
* users of a PLL are tracked and that tracking is integrated with the atomic
- * modest interface. During an atomic operation, a PLL can be requested for a
- * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
- * a previously used PLL can be released with intel_release_shared_dpll().
+ * modset interface. During an atomic operation, required PLLs can be reserved
+ * for a given CRTC and encoder configuration by calling
+ * intel_reserve_shared_dplls() and previously reserved PLLs can be released
+ * with intel_release_shared_dplls().
* Changes to the users are first staged in the atomic state, and then made
* effective by calling intel_shared_dpll_swap_state() during the atomic
* commit phase.
@@ -243,17 +244,18 @@ out:
}
static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
+intel_find_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll_hw_state *pll_state,
enum intel_dpll_id range_min,
enum intel_dpll_id range_max)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
struct intel_shared_dpll_state *shared_dpll;
enum intel_dpll_id i;
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
for (i = range_min; i <= range_max; i++) {
pll = &dev_priv->shared_dplls[i];
@@ -265,9 +267,9 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
continue;
}
- if (memcmp(&crtc_state->dpll_hw_state,
+ if (memcmp(pll_state,
&shared_dpll[i].hw_state,
- sizeof(crtc_state->dpll_hw_state)) == 0) {
+ sizeof(*pll_state)) == 0) {
DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
@@ -289,26 +291,51 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
}
static void
-intel_reference_shared_dpll(struct intel_shared_dpll *pll,
- struct intel_crtc_state *crtc_state)
+intel_reference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
{
struct intel_shared_dpll_state *shared_dpll;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const enum intel_dpll_id id = pll->info->id;
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
if (shared_dpll[id].crtc_mask == 0)
- shared_dpll[id].hw_state =
- crtc_state->dpll_hw_state;
+ shared_dpll[id].hw_state = *pll_state;
- crtc_state->shared_dpll = pll;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
pipe_name(crtc->pipe));
shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
}
+static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll)
+{
+ struct intel_shared_dpll_state *shared_dpll;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+static void intel_put_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->shared_dpll = NULL;
+
+ if (!old_crtc_state->shared_dpll)
+ return;
+
+ intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+}
+
/**
* intel_shared_dpll_swap_state - make atomic DPLL configuration effective
* @state: atomic state
@@ -320,25 +347,20 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
* i.e. it also puts the current state into @state, even though there is no
* need for that at this moment.
*/
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_shared_dpll_state *shared_dpll;
- struct intel_shared_dpll *pll;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
enum intel_dpll_id i;
- if (!to_intel_atomic_state(state)->dpll_set)
+ if (!state->dpll_set)
return;
- shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll_state tmp;
+ struct intel_shared_dpll *pll =
+ &dev_priv->shared_dplls[i];
- pll = &dev_priv->shared_dplls[i];
-
- tmp = pll->state;
- pll->state = shared_dpll[i];
- shared_dpll[i] = tmp;
+ swap(pll->state, shared_dpll[i]);
}
}
@@ -421,11 +443,12 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
udelay(200);
}
-static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool ibx_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -439,18 +462,22 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B);
}
if (!pll)
- return NULL;
+ return false;
/* reference the pll */
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -767,8 +794,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *
+hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
u32 val;
unsigned int p, n2, r2;
@@ -781,7 +812,8 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
if (!pll)
@@ -821,38 +853,44 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
-static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool hsw_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(crtc_state);
+ pll = hsw_ddi_hdmi_get_dpll(state, crtc);
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
pll = hsw_ddi_dp_get_dpll(crtc_state);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return NULL;
+ return false;
crtc_state->dpll_hw_state.spll =
SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SPLL, DPLL_ID_SPLL);
} else {
- return NULL;
+ return false;
}
if (!pll)
- return NULL;
+ return false;
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -962,11 +1000,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[id].ctl,
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL_STATUS,
- DPLL_LOCK(id),
- DPLL_LOCK(id),
- 5))
+ if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
DRM_ERROR("DPLL %d not locked\n", id);
}
@@ -1385,10 +1419,12 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
bool bret;
@@ -1396,32 +1432,37 @@ skl_get_dpll(struct intel_crtc_state *crtc_state,
bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
- return NULL;
+ return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
- return NULL;
+ return false;
}
} else {
- return NULL;
+ return false;
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
else
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SKL_DPLL1,
DPLL_ID_SKL_DPLL3);
if (!pll)
- return NULL;
+ return false;
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1827,22 +1868,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool bxt_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
!bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
- return NULL;
+ return false;
if (intel_crtc_has_dp_encoder(crtc_state) &&
!bxt_ddi_dp_set_dpll_hw_state(crtc_state))
- return NULL;
+ return false;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
@@ -1851,9 +1893,12 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1884,9 +1929,14 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder);
-
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
};
@@ -1899,7 +1949,8 @@ static const struct dpll_info pch_plls[] = {
static const struct intel_dpll_mgr pch_pll_mgr = {
.dpll_info = pch_plls,
- .get_dpll = ibx_get_dpll,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = ibx_dump_hw_state,
};
@@ -1915,7 +1966,8 @@ static const struct dpll_info hsw_plls[] = {
static const struct intel_dpll_mgr hsw_pll_mgr = {
.dpll_info = hsw_plls,
- .get_dpll = hsw_get_dpll,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = hsw_dump_hw_state,
};
@@ -1929,7 +1981,8 @@ static const struct dpll_info skl_plls[] = {
static const struct intel_dpll_mgr skl_pll_mgr = {
.dpll_info = skl_plls,
- .get_dpll = skl_get_dpll,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = skl_dump_hw_state,
};
@@ -1942,7 +1995,8 @@ static const struct dpll_info bxt_plls[] = {
static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
- .get_dpll = bxt_get_dpll,
+ .get_dplls = bxt_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -1958,11 +2012,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_POWER_STATE,
- PLL_POWER_STATE,
- 5))
+ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
+ PLL_POWER_STATE, 5))
DRM_ERROR("PLL %d Power not enabled\n", id);
/*
@@ -1999,11 +2050,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_LOCK,
- PLL_LOCK,
- 5))
+ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
DRM_ERROR("PLL %d not locked\n", id);
/*
@@ -2047,11 +2094,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_LOCK,
- 0,
- 5))
+ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
DRM_ERROR("PLL %d locked\n", id);
/*
@@ -2069,11 +2112,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_POWER_STATE,
- 0,
- 5))
+ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
+ PLL_POWER_STATE, 5))
DRM_ERROR("PLL %d Power not disabled\n", id);
}
@@ -2332,10 +2372,12 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool cnl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
bool bret;
@@ -2343,31 +2385,35 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state,
bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
- return NULL;
+ return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
- return NULL;
+ return false;
}
} else {
DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
crtc_state->output_types);
- return NULL;
+ return false;
}
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL2);
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
- return NULL;
+ return false;
}
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -2394,7 +2440,8 @@ static const struct dpll_info cnl_plls[] = {
static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
- .get_dpll = cnl_get_dpll,
+ .get_dplls = cnl_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2506,14 +2553,16 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
}
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+ struct intel_encoder *encoder,
+ struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
- if (intel_port_is_tc(dev_priv, encoder->port))
+ if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
+ encoder->port)))
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
@@ -2530,14 +2579,17 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params.kdiv) |
- DPLL_CFGCR1_PDIV(pll_params.pdiv) |
- DPLL_CFGCR1_CENTRAL_FREQ_8400;
+ DPLL_CFGCR1_PDIV(pll_params.pdiv);
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ if (INTEL_GEN(dev_priv) >= 12)
+ cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ else
+ cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
- crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ memset(pll_state, 0, sizeof(*pll_state));
+
+ pll_state->cfgcr0 = cfgcr0;
+ pll_state->cfgcr1 = cfgcr1;
return true;
}
@@ -2627,10 +2679,10 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+ struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
int refclk_khz = dev_priv->cdclk.hw.ref;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2792,63 +2844,184 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+/**
+ * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
+ * @crtc_state: state for the CRTC to select the DPLL for
+ * @port_dpll_id: the active @port_dpll_id to select
+ *
+ * Select the given @port_dpll_id instance from the DPLLs reserved for the
+ * CRTC.
+ */
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_digital_port *intel_dig_port;
- struct intel_shared_dpll *pll;
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[port_dpll_id];
+
+ crtc_state->shared_dpll = port_dpll->pll;
+ crtc_state->dpll_hw_state = port_dpll->hw_state;
+}
+
+static void icl_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_digital_port *primary_port;
+ enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+
+ primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
+ enc_to_mst(&encoder->base)->primary :
+ enc_to_dig_port(&encoder->base);
+
+ if (primary_port &&
+ (primary_port->tc_mode == TC_PORT_DP_ALT ||
+ primary_port->tc_mode == TC_PORT_LEGACY))
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+}
+
+static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
- enum intel_dpll_id min, max;
- bool ret;
+ bool has_dpll4 = false;
- if (intel_port_is_combophy(dev_priv, port)) {
- min = DPLL_ID_ICL_DPLL0;
- max = DPLL_ID_ICL_DPLL1;
- ret = icl_calc_dpll_state(crtc_state, encoder);
- } else if (intel_port_is_tc(dev_priv, port)) {
- if (encoder->type == INTEL_OUTPUT_DP_MST) {
- struct intel_dp_mst_encoder *mst_encoder;
+ if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
- mst_encoder = enc_to_mst(&encoder->base);
- intel_dig_port = mst_encoder->primary;
- } else {
- intel_dig_port = enc_to_dig_port(&encoder->base);
- }
+ return false;
+ }
- if (intel_dig_port->tc_type == TC_PORT_TBT) {
- min = DPLL_ID_ICL_TBTPLL;
- max = min;
- ret = icl_calc_dpll_state(crtc_state, encoder);
- } else {
- enum tc_port tc_port;
+ if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
+ has_dpll4 = true;
+
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ DPLL_ID_ICL_DPLL0,
+ has_dpll4 ? DPLL_ID_EHL_DPLL4
+ : DPLL_ID_ICL_DPLL1);
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
+ port_name(encoder->port));
+ return false;
+ }
- tc_port = intel_port_to_tc(dev_priv, port);
- min = icl_tc_port_to_pll_id(tc_port);
- max = min;
- ret = icl_calc_mg_pll_state(crtc_state);
- }
- } else {
- MISSING_CASE(port);
- return NULL;
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
+
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return true;
+}
+
+static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll;
+ enum intel_dpll_id dpll_id;
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+ return false;
}
- if (!ret) {
- DRM_DEBUG_KMS("Could not calculate PLL state.\n");
- return NULL;
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ DPLL_ID_ICL_TBTPLL,
+ DPLL_ID_ICL_TBTPLL);
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+ return false;
}
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
- pll = intel_find_shared_dpll(crtc_state, min, max);
- if (!pll) {
- DRM_DEBUG_KMS("No PLL selected\n");
- return NULL;
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+ if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+ goto err_unreference_tbt_pll;
}
- intel_reference_shared_dpll(pll, crtc_state);
+ dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+ encoder->port));
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ dpll_id,
+ dpll_id);
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No MG PHY PLL found\n");
+ goto err_unreference_tbt_pll;
+ }
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
- return pll;
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return true;
+
+err_unreference_tbt_pll:
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+
+ return false;
+}
+
+static bool icl_get_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return icl_get_combo_phy_dpll(state, crtc, encoder);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return icl_get_tc_phy_dplls(state, crtc, encoder);
+
+ MISSING_CASE(phy);
+
+ return false;
+}
+
+static void icl_put_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum icl_port_dpll_id id;
+
+ new_crtc_state->shared_dpll = NULL;
+
+ for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
+ const struct icl_port_dpll *old_port_dpll =
+ &old_crtc_state->icl_port_dplls[id];
+ struct icl_port_dpll *new_port_dpll =
+ &new_crtc_state->icl_port_dplls[id];
+
+ new_port_dpll->pll = NULL;
+
+ if (!old_port_dpll->pll)
+ continue;
+
+ intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+ }
}
static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2932,8 +3105,18 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
+ } else {
+ if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
+ } else {
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ }
+ }
ret = true;
out:
@@ -2945,8 +3128,14 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- return icl_pll_get_hw_state(dev_priv, pll, hw_state,
- CNL_DPLL_ENABLE(pll->info->id));
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+ }
+
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
}
static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2961,10 +3150,24 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
+ i915_reg_t cfgcr0_reg, cfgcr1_reg;
- I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
- I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
- POSTING_READ(ICL_DPLL_CFGCR1(id));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ cfgcr0_reg = TGL_DPLL_CFGCR0(id);
+ cfgcr1_reg = TGL_DPLL_CFGCR1(id);
+ } else {
+ if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(4);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(4);
+ } else {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(id);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(id);
+ }
+ }
+
+ I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
+ I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
+ POSTING_READ(cfgcr1_reg);
}
static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
@@ -3031,8 +3234,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
- PLL_POWER_STATE, PLL_POWER_STATE, 1))
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
}
@@ -3047,8 +3249,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 600us. */
- if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
- PLL_LOCK, PLL_LOCK, 1))
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
DRM_ERROR("PLL %d not locked\n", pll->info->id);
}
@@ -3057,6 +3258,19 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
{
i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+
+ /*
+ * We need to disable DC states when this DPLL is enabled.
+ * This can be done by taking a reference on DPLL4 power
+ * domain.
+ */
+ pll->wakeref = intel_display_power_get(dev_priv,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
icl_pll_power_enable(dev_priv, pll, enable_reg);
icl_dpll_write(dev_priv, pll);
@@ -3130,8 +3344,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
- if (intel_wait_for_register(&dev_priv->uncore,
- enable_reg, PLL_LOCK, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
DRM_ERROR("PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -3144,15 +3357,26 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- enable_reg, PLL_POWER_STATE, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
}
static void combo_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+ icl_pll_disable(dev_priv, pll, enable_reg);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
+ pll->wakeref);
+ return;
+ }
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
}
static void tbt_pll_disable(struct drm_i915_private *dev_priv,
@@ -3223,19 +3447,38 @@ static const struct dpll_info icl_plls[] = {
static const struct intel_dpll_mgr icl_pll_mgr = {
.dpll_info = icl_plls,
- .get_dpll = icl_get_dpll,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};
static const struct dpll_info ehl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
{ },
};
static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
- .get_dpll = icl_get_dpll,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info tgl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ /* TODO: Add typeC plls */
+ { },
+};
+
+static const struct intel_dpll_mgr tgl_pll_mgr = {
+ .dpll_info = tgl_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3252,7 +3495,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ELKHARTLAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 12)
+ dpll_mgr = &tgl_pll_mgr;
+ else if (IS_ELKHARTLAKE(dev_priv))
dpll_mgr = &ehl_pll_mgr;
else if (INTEL_GEN(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
@@ -3287,50 +3532,87 @@ void intel_shared_dpll_init(struct drm_device *dev)
}
/**
- * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc_state: atomic state for the crtc
+ * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to reserve DPLLs for
* @encoder: encoder
*
- * Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc_state to the returned pll is registered in the
- * atomic state. That configuration is made effective by calling
- * intel_shared_dpll_swap_state(). The reference should be released by calling
- * intel_release_shared_dpll().
+ * This function reserves all required DPLLs for the given CRTC and encoder
+ * combination in the current atomic commit @state and the new @crtc atomic
+ * state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * The reserved DPLLs should be released by calling
+ * intel_release_shared_dplls().
*
* Returns:
- * A shared DPLL to be used by @crtc_state and @encoder.
+ * True if all required DPLLs were successfully reserved.
*/
-struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
if (WARN_ON(!dpll_mgr))
- return NULL;
+ return false;
- return dpll_mgr->get_dpll(crtc_state, encoder);
+ return dpll_mgr->get_dplls(state, crtc, encoder);
}
/**
- * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
- * @dpll: dpll in use by @crtc
- * @crtc: crtc
+ * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
* @state: atomic state
+ * @crtc: crtc from which the DPLLs are to be released
*
- * This function releases the reference from @crtc to @dpll from the
- * atomic @state. The new configuration is made effective by calling
- * intel_shared_dpll_swap_state().
+ * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * from the current atomic commit @state and the old @crtc atomic state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
*/
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
- struct intel_crtc *crtc,
- struct drm_atomic_state *state)
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_shared_dpll_state *shared_dpll_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ /*
+ * FIXME: this function is called for every platform having a
+ * compute_clock hook, even though the platform doesn't yet support
+ * the shared DPLL framework and intel_reserve_shared_dplls() is not
+ * called on those.
+ */
+ if (!dpll_mgr)
+ return;
+
+ dpll_mgr->put_dplls(state, crtc);
+}
+
+/**
+ * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * @state: atomic state
+ * @crtc: the CRTC for which to update the active DPLL
+ * @encoder: encoder determining the type of port DPLL
+ *
+ * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
+ * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * DPLL selected will be based on the current mode of the encoder's port.
+ */
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ if (WARN_ON(!dpll_mgr))
+ return;
- shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
- shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+ dpll_mgr->update_active_dpll(state, crtc, encoder);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index d0570414f3d1..e7588799fce5 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include "intel_display.h"
+#include "intel_wakeref.h"
/*FIXME: Move this to a more appropriate place. */
#define abs_diff(a, b) ({ \
@@ -36,9 +37,9 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
-struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
@@ -110,35 +111,59 @@ enum intel_dpll_id {
/**
- * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
+ * @DPLL_ID_ICL_DPLL0: ICL/TGL combo PHY DPLL0
*/
DPLL_ID_ICL_DPLL0 = 0,
/**
- * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
+ * @DPLL_ID_ICL_DPLL1: ICL/TGL combo PHY DPLL1
*/
DPLL_ID_ICL_DPLL1 = 1,
/**
- * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+ * @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4
+ */
+ DPLL_ID_EHL_DPLL4 = 2,
+ /**
+ * @DPLL_ID_ICL_TBTPLL: ICL/TGL TBT PLL
*/
DPLL_ID_ICL_TBTPLL = 2,
/**
- * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
+ * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C),
+ * TGL TC PLL 1 port 1 (TC1)
*/
DPLL_ID_ICL_MGPLL1 = 3,
/**
* @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
+ * TGL TC PLL 1 port 2 (TC2)
*/
DPLL_ID_ICL_MGPLL2 = 4,
/**
* @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
+ * TGL TC PLL 1 port 3 (TC3)
*/
DPLL_ID_ICL_MGPLL3 = 5,
/**
* @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
+ * TGL TC PLL 1 port 4 (TC4)
*/
DPLL_ID_ICL_MGPLL4 = 6,
+ /**
+ * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
+ */
+ DPLL_ID_TGL_MGPLL5 = 7,
+ /**
+ * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
+ */
+ DPLL_ID_TGL_MGPLL6 = 8,
+};
+
+#define I915_NUM_PLLS 9
+
+enum icl_port_dpll_id {
+ ICL_PORT_DPLL_DEFAULT,
+ ICL_PORT_DPLL_MG_PHY,
+
+ ICL_PORT_DPLL_COUNT,
};
-#define I915_NUM_PLLS 7
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -195,7 +220,7 @@ struct intel_dpll_hw_state {
* future state which would be applied by an atomic mode set (stored in
* a struct &intel_atomic_state).
*
- * See also intel_get_shared_dpll() and intel_release_shared_dpll().
+ * See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
*/
struct intel_shared_dpll_state {
/**
@@ -312,6 +337,7 @@ struct intel_shared_dpll {
* @info: platform specific info
*/
const struct dpll_info *info;
+ intel_wakeref_t wakeref;
};
#define SKL_DPLL0 0
@@ -331,15 +357,20 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
- struct intel_encoder *encoder);
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
- struct intel_crtc *crtc,
- struct drm_atomic_state *state);
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id);
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 6d20434636cd..b15be5814599 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -26,7 +26,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
-#include "intel_drv.h"
+
+#include "intel_display_types.h"
#define INTEL_DSI_VIDEO_MODE 0
#define INTEL_DSI_COMMAND_MODE 1
@@ -49,8 +50,11 @@ struct intel_dsi {
struct intel_connector *attached_connector;
- /* bit mask of ports being driven */
- u16 ports;
+ /* bit mask of ports (vlv dsi) or phys (icl dsi) being driven */
+ union {
+ u16 ports; /* VLV DSI */
+ u16 phys; /* ICL DSI */
+ };
/* if true, use HS mode, otherwise LP */
bool hs;
@@ -132,7 +136,10 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
return container_of(h, struct intel_dsi_host, base);
}
-#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
+#define for_each_dsi_port(__port, __ports_mask) \
+ for_each_port_masked(__port, __ports_mask)
+#define for_each_dsi_phy(__phy, __phys_mask) \
+ for_each_phy_masked(__phy, __phys_mask)
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 8c33262cb0b2..bb3fd8b786a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -27,7 +27,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e5b178660408..f90946c912ee 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -38,7 +38,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 22666d28f4aa..93baf366692e 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,7 +34,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index d36cada2cc7d..16ed44bfd734 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,7 +41,7 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -110,9 +110,8 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_wait_for_register(&dev_priv->uncore,
- FBC_STATUS, FBC_STAT_COMPRESSING, 0,
- 10)) {
+ if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
+ FBC_STAT_COMPRESSING, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1edd44ee32b2..d59eee5c5d9c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -43,17 +43,18 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
- unsigned int origin =
- ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
+ return ifbdev->fb->frontbuffer;
+}
- intel_fb_obj_invalidate(obj, origin);
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+ intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
}
static int intel_fbdev_set_par(struct fb_info *info)
@@ -120,7 +121,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
- int size, ret;
+ int size;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -147,24 +148,16 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
- ret = PTR_ERR(obj);
- goto err;
+ return PTR_ERR(obj);
}
fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
ifbdev->fb = to_intel_framebuffer(fb);
-
return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err:
- return ret;
}
static int intelfb_create(struct drm_fb_helper *helper,
@@ -180,7 +173,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
- struct drm_framebuffer *fb;
intel_wakeref_t wakeref;
struct fb_info *info;
struct i915_vma *vma;
@@ -226,8 +218,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
- fb = &ifbdev->fb->base;
- intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
@@ -236,17 +227,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- ifbdev->helper.fb = fb;
+ ifbdev->helper.fb = &ifbdev->fb->base;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
- info->fix.smem_len = vma->node.size;
-
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -256,19 +244,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start = (unsigned long)info->screen_base;
+ info->fix.smem_len = info->screen_size;
+
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (intel_fb_obj(fb)->stolen && !prealloc)
+ if (vma->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
- fb->width, fb->height, i915_ggtt_offset(vma));
+ ifbdev->fb->base.width, ifbdev->fb->base.height,
+ i915_ggtt_offset(vma));
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 8545ad32bb50..ab61f88d1d33 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -26,7 +26,8 @@
*/
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 44273c10cea5..719379774fa5 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -30,11 +30,11 @@
* Many features require us to track changes to the currently active
* frontbuffer, especially rendering targeted at the frontbuffer.
*
- * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
- * frontbuffer slots through i915_gem_track_fb(). The function in this file are
- * then called when the contents of the frontbuffer are invalidated, when
- * frontbuffer rendering has stopped again to flush out all the changes and when
- * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * To be able to do so we track frontbuffers using a bitmask for all possible
+ * frontbuffer slots through intel_frontbuffer_track(). The functions in this
+ * file are then called when the contents of the frontbuffer are invalidated,
+ * when frontbuffer rendering has stopped again to flush out all the changes
+ * and when the frontbuffer is exchanged with a flip. Subsystems interested in
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
* into the relevant places and filter for the frontbuffer slots that they are
* interested int.
@@ -58,33 +58,14 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (origin == ORIGIN_CS) {
- spin_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
- }
-
- might_sleep();
- intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
- intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
- intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
-}
-
/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev_priv: i915 device
+ * frontbuffer_flush - flush frontbuffer
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -94,45 +75,27 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin)
+static void frontbuffer_flush(struct drm_i915_private *i915,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
/* Delay flushing when rings are still busy.*/
- spin_lock(&dev_priv->fb_tracking.lock);
- frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
+ spin_unlock(&i915->fb_tracking.lock);
if (!frontbuffer_bits)
return;
might_sleep();
- intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
- intel_psr_flush(dev_priv, frontbuffer_bits, origin);
- intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
-}
-
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (origin == ORIGIN_CS) {
- spin_lock(&dev_priv->fb_tracking.lock);
- /* Filter out new bits since rendering started. */
- frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
- }
-
- if (frontbuffer_bits)
- intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
+ intel_edp_drrs_flush(i915, frontbuffer_bits);
+ intel_psr_flush(i915, frontbuffer_bits, origin);
+ intel_fbc_flush(i915, frontbuffer_bits, origin);
}
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -142,19 +105,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&i915->fb_tracking.lock);
+ i915->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -162,23 +125,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= i915->fb_tracking.flip_bits;
+ i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
if (frontbuffer_bits)
- intel_frontbuffer_flush(dev_priv,
- frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -187,13 +149,160 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
- intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->fb_tracking.lock);
+ i915->fb_tracking.busy_bits |= frontbuffer_bits;
+ i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
+ }
+
+ might_sleep();
+ intel_psr_invalidate(i915, frontbuffer_bits, origin);
+ intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+ intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+}
+
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= i915->fb_tracking.busy_bits;
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
+ }
+
+ if (frontbuffer_bits)
+ frontbuffer_flush(i915, frontbuffer_bits, origin);
+}
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ kref_get(&front->ref);
+ return 0;
+}
+
+static void frontbuffer_retire(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ intel_frontbuffer_flush(front, ORIGIN_CS);
+ intel_frontbuffer_put(front);
+}
+
+static void frontbuffer_release(struct kref *ref)
+ __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+
+ front->obj->frontbuffer = NULL;
+ spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
+
+ i915_gem_object_put(front->obj);
+ kfree(front);
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_frontbuffer *front;
+
+ spin_lock(&i915->fb_tracking.lock);
+ front = obj->frontbuffer;
+ if (front)
+ kref_get(&front->ref);
+ spin_unlock(&i915->fb_tracking.lock);
+ if (front)
+ return front;
+
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
+
+ front->obj = obj;
+ kref_init(&front->ref);
+ atomic_set(&front->bits, 0);
+ i915_active_init(i915, &front->write,
+ frontbuffer_active, frontbuffer_retire);
+
+ spin_lock(&i915->fb_tracking.lock);
+ if (obj->frontbuffer) {
+ kfree(front);
+ front = obj->frontbuffer;
+ kref_get(&front->ref);
+ } else {
+ i915_gem_object_get(obj);
+ obj->frontbuffer = front;
+ }
+ spin_unlock(&i915->fb_tracking.lock);
+
+ return front;
+}
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front)
+{
+ kref_put_lock(&front->ref,
+ frontbuffer_release,
+ &to_i915(front->obj->base.dev)->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_track - update frontbuffer tracking
+ * @old: current buffer for the frontbuffer slots
+ * @new: new buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits)
+{
+ /*
+ * Control of individual bits within the mask are guarded by
+ * the owning plane->mutex, i.e. we can never see concurrent
+ * manipulation of individual bits. But since the bitfield as a whole
+ * is updated using RMW, we need to use atomics in order to update
+ * the bits.
+ */
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+ BITS_PER_TYPE(atomic_t));
+
+ if (old) {
+ WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+ atomic_andnot(frontbuffer_bits, &old->bits);
+ }
+
+ if (new) {
+ WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+ atomic_or(frontbuffer_bits, &new->bits);
+ }
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 5727320c8084..adc64d61a4a5 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -24,7 +24,10 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
-#include "gem/i915_gem_object.h"
+#include <linux/atomic.h>
+#include <linux/kref.h>
+
+#include "i915_active.h"
struct drm_i915_private;
struct drm_i915_gem_object;
@@ -37,23 +40,30 @@ enum fb_op_origin {
ORIGIN_DIRTYFB,
};
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+struct intel_frontbuffer {
+ struct kref ref;
+ atomic_t bits;
+ struct i915_active write;
+ struct drm_i915_gem_object *obj;
+};
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj);
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
+ * intel_frontbuffer_invalidate - invalidate frontbuffer object
+ * @front: GEM object to invalidate
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
@@ -62,37 +72,53 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!front)
+ return false;
+
+ frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return false;
- __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ __intel_fb_invalidate(front, origin, frontbuffer_bits);
return true;
}
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
+
/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
+ * intel_frontbuffer_flush - flush frontbuffer object
+ * @front: GEM object to flush
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again.
*/
-static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!front)
+ return;
+
+ frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return;
- __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
+ __intel_fb_flush(front, origin, frontbuffer_bits);
}
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits);
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 4f6a9bd5af47..d6775a005726 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -35,7 +35,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_gmbus.h"
struct gmbus_pin {
@@ -82,25 +82,20 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
static const struct gmbus_pin gmbus_pins_icp[] = {
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
[GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
[GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
-};
-
-static const struct gmbus_pin gmbus_pins_mcc[] = {
- [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
- [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
- [GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
+ [GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION },
+ [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO },
};
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (HAS_PCH_MCC(dev_priv))
- return &gmbus_pins_mcc[pin];
- else if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
@@ -119,9 +114,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (HAS_PCH_MCC(dev_priv))
- size = ARRAY_SIZE(gmbus_pins_mcc);
- else if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index d989085b8d22..b96212b85425 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -11,6 +11,28 @@
struct drm_i915_private;
struct i2c_adapter;
+#define GMBUS_PIN_DISABLED 0
+#define GMBUS_PIN_SSC 1
+#define GMBUS_PIN_VGADDC 2
+#define GMBUS_PIN_PANEL 3
+#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
+#define GMBUS_PIN_DPC 4 /* HDMIC */
+#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
+#define GMBUS_PIN_DPD 6 /* HDMID */
+#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
+#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
+#define GMBUS_PIN_2_BXT 2
+#define GMBUS_PIN_3_BXT 3
+#define GMBUS_PIN_4_CNP 4
+#define GMBUS_PIN_9_TC1_ICP 9
+#define GMBUS_PIN_10_TC2_ICP 10
+#define GMBUS_PIN_11_TC3_ICP 11
+#define GMBUS_PIN_12_TC4_ICP 12
+#define GMBUS_PIN_13_TC5_TGP 13
+#define GMBUS_PIN_14_TC6_TGP 14
+
+#define GMBUS_NUM_PINS 15 /* including 0 */
+
int intel_gmbus_setup(struct drm_i915_private *dev_priv);
void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 27bd7276a82d..6ec5ceeab601 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -14,7 +14,8 @@
#include <drm/i915_component.h>
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
@@ -244,8 +245,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
- if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
- HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
@@ -475,9 +475,8 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
- HDCP_SHA1_COMPLETE,
- HDCP_SHA1_COMPLETE, 1)) {
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
@@ -523,12 +522,16 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* authentication.
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
- if (num_downstream == 0)
+ if (num_downstream == 0) {
+ DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
return -EINVAL;
+ }
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
- if (!ksv_fifo)
+ if (!ksv_fifo) {
+ DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
return -ENOMEM;
+ }
ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
if (ret)
@@ -616,9 +619,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
- HDCP_STATUS_AN_READY,
- HDCP_STATUS_AN_READY, 1)) {
+ if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
@@ -702,9 +704,9 @@ static int intel_hdcp_auth(struct intel_connector *connector)
}
/* Wait for encryption confirmation */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
- HDCP_STATUS_ENC, HDCP_STATUS_ENC,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_ENC,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -734,8 +736,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_wait_for_register(&dev_priv->uncore,
- PORT_HDCP_STATUS(port), ~0, 0,
+ if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -866,7 +867,6 @@ static void intel_hdcp_prop_work(struct work_struct *work)
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
- struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
@@ -876,10 +876,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
* those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state = connector->base.state;
- state->content_protection = hdcp->value;
- }
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ drm_hdcp_update_content_protection(&connector->base,
+ hdcp->value);
mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -1207,8 +1206,10 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+ if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
+ DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
+ }
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
@@ -1512,10 +1513,9 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
CTL_LINK_ENCRYPTION_REQ);
}
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
- LINK_ENCRYPTION_STATUS,
- LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
return ret;
}
@@ -1533,8 +1533,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
I915_WRITE(HDCP2_CTL_DDI(port),
I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
- LINK_ENCRYPTION_STATUS, 0x0,
+ ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
DRM_DEBUG_KMS("Disable Encryption Timedout");
@@ -1749,14 +1749,15 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
-static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+static inline int initialize_hdcp_port_data(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
data->port = connector->encoder->port;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
- data->protocol = (u8)hdcp->shim->protocol;
+ data->protocol = (u8)shim->protocol;
data->k = 1;
if (!data->streams)
@@ -1806,12 +1807,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector)
+static void intel_hdcp2_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector);
+ ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
DRM_DEBUG_KMS("Mei hdcp data init failed\n");
return;
@@ -1830,23 +1832,28 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- ret = drm_connector_attach_content_protection_property(&connector->base);
- if (ret)
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, shim);
+
+ ret =
+ drm_connector_attach_content_protection_property(&connector->base,
+ hdcp->hdcp2_supported);
+ if (ret) {
+ hdcp->hdcp2_supported = false;
+ kfree(hdcp->port_data.streams);
return ret;
+ }
hdcp->shim = shim;
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
-
- if (is_hdcp2_supported(dev_priv))
- intel_hdcp2_init(connector);
init_waitqueue_head(&hdcp->cp_irq_queue);
return 0;
}
-int intel_hdcp_enable(struct intel_connector *connector)
+int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
{
struct intel_hdcp *hdcp = &connector->hdcp;
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
@@ -1857,6 +1864,7 @@ int intel_hdcp_enable(struct intel_connector *connector)
mutex_lock(&hdcp->mutex);
WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ hdcp->content_type = content_type;
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -1868,8 +1876,12 @@ int intel_hdcp_enable(struct intel_connector *connector)
check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
}
- /* When HDCP2.2 fails, HDCP1.4 will be attempted */
- if (ret && intel_hdcp_capable(connector)) {
+ /*
+ * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
+ * be attempted.
+ */
+ if (ret && intel_hdcp_capable(connector) &&
+ hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
ret = _intel_hdcp_enable(connector);
}
@@ -1951,12 +1963,15 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/*
* Nothing to do if the state didn't change, or HDCP was activated since
- * the last commit
+ * the last commit. And also no change in hdcp content type.
*/
if (old_cp == new_cp ||
(old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
- return;
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
+ if (old_state->hdcp_content_type ==
+ new_state->hdcp_content_type)
+ return;
+ }
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index be8da85c866a..13555b054930 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -21,7 +21,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_state);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 0ebec69bbbfc..e02f0faecf02 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -45,17 +45,17 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_lspcon.h"
-#include "intel_sdvo.h"
#include "intel_panel.h"
+#include "intel_sdvo.h"
#include "intel_sideband.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
@@ -1514,29 +1514,28 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
-static struct hdcp2_hdmi_msg_data {
+struct hdcp2_hdmi_msg_data {
u8 msg_id;
u32 timeout;
u32 timeout2;
- } hdcp2_msg_data[] = {
- {HDCP_2_2_AKE_INIT, 0, 0},
- {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
- {HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
- {HDCP_2_2_AKE_STORED_KM, 0, 0},
- {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
- {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
- 0},
- {HDCP_2_2_LC_INIT, 0, 0},
- {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
- {HDCP_2_2_SKE_SEND_EKS, 0, 0},
- {HDCP_2_2_REP_SEND_RECVID_LIST,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
- {HDCP_2_2_REP_SEND_ACK, 0, 0},
- {HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
- {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
- 0},
- };
+};
+
+static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = {
+ { HDCP_2_2_AKE_INIT, 0, 0 },
+ { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+ { HDCP_2_2_AKE_NO_STORED_KM, 0, 0 },
+ { HDCP_2_2_AKE_STORED_KM, 0, 0 },
+ { HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
+ { HDCP_2_2_LC_INIT, 0, 0 },
+ { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 },
+ { HDCP_2_2_SKE_SEND_EKS, 0, 0 },
+ { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+ { HDCP_2_2_REP_SEND_ACK, 0, 0 },
+ { HDCP_2_2_REP_STREAM_MANAGE, 0, 0 },
+ { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+};
static
int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
@@ -2930,51 +2929,34 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
- u8 ddc_pin;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
- switch (port) {
- case PORT_A:
- ddc_pin = GMBUS_PIN_1_BXT;
- break;
- case PORT_B:
- ddc_pin = GMBUS_PIN_2_BXT;
- break;
- case PORT_C:
- ddc_pin = GMBUS_PIN_9_TC1_ICP;
- break;
- case PORT_D:
- ddc_pin = GMBUS_PIN_10_TC2_ICP;
- break;
- case PORT_E:
- ddc_pin = GMBUS_PIN_11_TC3_ICP;
- break;
- case PORT_F:
- ddc_pin = GMBUS_PIN_12_TC4_ICP;
- break;
- default:
- MISSING_CASE(port);
- ddc_pin = GMBUS_PIN_2_BXT;
- break;
- }
- return ddc_pin;
+ if (intel_phy_is_combo(dev_priv, phy))
+ return GMBUS_PIN_1_BXT + port;
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
+
+ WARN(1, "Unknown port:%c\n", port_name(port));
+ return GMBUS_PIN_2_BXT;
}
static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
u8 ddc_pin;
- switch (port) {
- case PORT_A:
+ switch (phy) {
+ case PHY_A:
ddc_pin = GMBUS_PIN_1_BXT;
break;
- case PORT_B:
+ case PHY_B:
ddc_pin = GMBUS_PIN_2_BXT;
break;
- case PORT_C:
+ case PHY_C:
ddc_pin = GMBUS_PIN_9_TC1_ICP;
break;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
ddc_pin = GMBUS_PIN_1_BXT;
break;
}
@@ -3019,7 +3001,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_ICP(dev_priv))
+ else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@ -3143,6 +3125,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("CEC notifier get failed\n");
}
+static enum intel_hotplug_state
+intel_hdmi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector, bool irq_received)
+{
+ enum intel_hotplug_state state;
+
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
+
+ /*
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
@@ -3166,7 +3174,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port));
- intel_encoder->hotplug = intel_encoder_hotplug;
+ intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index ea3de4acc850..56be20f6f47e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -26,7 +26,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hotplug.h"
/**
@@ -104,6 +104,12 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
if (IS_CNL_WITH_PORT_F(dev_priv))
return HPD_PORT_E;
return HPD_PORT_F;
+ case PORT_G:
+ return HPD_PORT_G;
+ case PORT_H:
+ return HPD_PORT_H;
+ case PORT_I:
+ return HPD_PORT_I;
default:
MISSING_CASE(port);
return HPD_NONE;
@@ -112,6 +118,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
+#define HPD_RETRY_DELAY 1000
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
@@ -266,8 +273,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+enum intel_hotplug_state
+intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
@@ -279,7 +288,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_helper_probe_detect(&connector->base, NULL, false);
if (old_status == connector->base.status)
- return false;
+ return INTEL_HOTPLUG_UNCHANGED;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.base.id,
@@ -287,7 +296,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->base.status));
- return true;
+ return INTEL_HOTPLUG_CHANGED;
}
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
@@ -339,7 +348,7 @@ static void i915_digport_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- schedule_work(&dev_priv->hotplug.hotplug_work);
+ queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
}
@@ -349,14 +358,16 @@ static void i915_digport_work_func(struct work_struct *work)
static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+ container_of(work, struct drm_i915_private,
+ hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- bool changed = false;
+ u32 changed = 0, retry = 0;
u32 hpd_event_bits;
+ u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -365,6 +376,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
+ hpd_retry_bits = dev_priv->hotplug.retry_bits;
+ dev_priv->hotplug.retry_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
@@ -373,16 +386,29 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ u32 hpd_bit;
+
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
- if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ hpd_bit = BIT(intel_encoder->hpd_pin);
+ if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
connector->name, intel_encoder->hpd_pin);
- changed |= intel_encoder->hotplug(intel_encoder,
- intel_connector);
+ switch (intel_encoder->hotplug(intel_encoder,
+ intel_connector,
+ hpd_event_bits & hpd_bit)) {
+ case INTEL_HOTPLUG_UNCHANGED:
+ break;
+ case INTEL_HOTPLUG_CHANGED:
+ changed |= hpd_bit;
+ break;
+ case INTEL_HOTPLUG_RETRY:
+ retry |= hpd_bit;
+ break;
+ }
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -390,6 +416,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (changed)
drm_kms_helper_hotplug_event(dev);
+
+ /* Remove shared HPD pins that have changed */
+ retry &= ~changed;
+ if (retry) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->hotplug.retry_bits |= retry;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
+ msecs_to_jiffies(HPD_RETRY_DELAY));
+ }
}
@@ -516,7 +553,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
if (queue_hp)
- schedule_work(&dev_priv->hotplug.hotplug_work);
+ queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
/**
@@ -636,7 +673,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
- INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+ INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
+ i915_hotplug_work_func);
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
@@ -650,11 +688,12 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
dev_priv->hotplug.long_port_mask = 0;
dev_priv->hotplug.short_port_mask = 0;
dev_priv->hotplug.event_bits = 0;
+ dev_priv->hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
- cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+ cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
cancel_work_sync(&dev_priv->hotplug.poll_init_work);
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 805f897dbb7a..b0cd447b7fbc 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -15,8 +15,9 @@ struct intel_connector;
struct intel_encoder;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector);
+enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 7028d0cf3bb1..f8f1308643a9 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,8 +27,8 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_edid.h>
+#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_drv.h"
#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index efefed62a7f8..b7c459a8931c 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -42,7 +42,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
@@ -318,8 +318,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(&dev_priv->uncore,
- PP_STATUS(0), PP_ON, PP_ON, 5000))
+ if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -333,8 +332,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
- if (intel_wait_for_register(&dev_priv->uncore,
- PP_STATUS(0), PP_ON, 0, 1000))
+ if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 824881271351..969ade623691 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -35,7 +35,7 @@
#include "display/intel_panel.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_opregion.h"
#define OPREGION_HEADER_OFFSET 0
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 21339b7f6a3e..29edfc343716 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -33,7 +33,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
@@ -175,6 +175,7 @@ struct overlay_registers {
struct intel_overlay {
struct drm_i915_private *i915;
+ struct intel_context *context;
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
@@ -190,7 +191,8 @@ struct intel_overlay {
struct overlay_registers __iomem *regs;
u32 flip_addr;
/* flip handling */
- struct i915_active_request last_flip;
+ struct i915_active last_flip;
+ void (*flip_complete)(struct intel_overlay *ovl);
};
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
@@ -216,32 +218,25 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
}
-static void intel_overlay_submit_request(struct intel_overlay *overlay,
- struct i915_request *rq,
- i915_active_retire_fn retire)
+static struct i915_request *
+alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
{
- GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex));
- i915_active_request_set_retire_fn(&overlay->last_flip, retire,
- &overlay->i915->drm.struct_mutex);
- __i915_active_request_set(&overlay->last_flip, rq);
- i915_request_add(rq);
-}
+ struct i915_request *rq;
+ int err;
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct i915_request *rq,
- i915_active_retire_fn retire)
-{
- intel_overlay_submit_request(overlay, rq, retire);
- return i915_active_request_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
-}
+ overlay->flip_complete = fn;
-static struct i915_request *alloc_request(struct intel_overlay *overlay)
-{
- struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
+ rq = i915_request_create(overlay->context);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
- return i915_request_create(engine->kernel_context);
+ return rq;
}
/* overlay needs to be disable in OCMD reg */
@@ -253,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(overlay->active);
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -274,7 +269,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
- return intel_overlay_do_wait_request(overlay, rq, NULL);
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
}
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -284,9 +281,9 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
WARN_ON(overlay->old_vma);
- i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
- vma ? vma->obj : NULL,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
+ vma ? vma->obj->frontbuffer : NULL,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -318,7 +315,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -333,8 +330,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, vma);
-
- intel_overlay_submit_request(overlay, rq, NULL);
+ i915_request_add(rq);
return 0;
}
@@ -355,20 +351,13 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
}
static void
-intel_overlay_release_old_vid_tail(struct i915_active_request *active,
- struct i915_request *rq)
+intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
-
intel_overlay_release_old_vma(overlay);
}
-static void intel_overlay_off_tail(struct i915_active_request *active,
- struct i915_request *rq)
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
struct drm_i915_private *dev_priv = overlay->i915;
intel_overlay_release_old_vma(overlay);
@@ -381,6 +370,16 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
i830_overlay_clock_gating(dev_priv, true);
}
+static void
+intel_overlay_last_flip_retire(struct i915_active *active)
+{
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ if (overlay->flip_complete)
+ overlay->flip_complete(overlay);
+}
+
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
@@ -395,7 +394,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, intel_overlay_off_tail);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -418,17 +417,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, NULL);
+ i915_request_add(rq);
- return intel_overlay_do_wait_request(overlay, rq,
- intel_overlay_off_tail);
+ return i915_active_wait(&overlay->last_flip);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- return i915_active_request_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
+ return i915_active_wait(&overlay->last_flip);
}
/* Wait for pending overlay flip and release old frame.
@@ -438,43 +436,40 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
+ struct i915_request *rq;
u32 *cs;
- int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- /* Only wait if there is actually an old frame to release to
+ /*
+ * Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
if (!overlay->old_vma)
return 0;
- if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
- /* synchronous slowpath */
- struct i915_request *rq;
+ if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+ }
- rq = alloc_request(overlay);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
- ret = intel_overlay_do_wait_request(overlay, rq,
- intel_overlay_release_old_vid_tail);
- if (ret)
- return ret;
- } else
- intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
+ i915_request_add(rq);
- return 0;
+ return i915_active_wait(&overlay->last_flip);
}
void intel_overlay_reset(struct drm_i915_private *dev_priv)
@@ -773,11 +768,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
ret = PTR_ERR(vma);
goto out_pin_section;
}
- intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
-
- ret = i915_vma_put_fence(vma);
- if (ret)
- goto out_unpin;
+ intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
if (!overlay->active) {
u32 oconfig;
@@ -1359,11 +1350,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
+ if (!HAS_ENGINE(dev_priv, RCS0))
+ return;
+
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
overlay->i915 = dev_priv;
+ overlay->context = dev_priv->engine[RCS0]->kernel_context;
+ GEM_BUG_ON(!overlay->context);
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
@@ -1371,7 +1367,9 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
- INIT_ACTIVE_REQUEST(&overlay->last_flip);
+ i915_active_init(dev_priv,
+ &overlay->last_flip,
+ NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
@@ -1405,6 +1403,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
WARN_ON(overlay->active);
i915_gem_object_put(overlay->reg_bo);
+ i915_active_fini(&overlay->last_flip);
kfree(overlay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 39d742094065..bc14e9c0285a 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -35,8 +35,8 @@
#include <linux/pwm.h>
#include "intel_connector.h"
+#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_drv.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 1e2c4307d05a..6260a2082719 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -30,7 +30,7 @@
#include <linux/seq_file.h>
#include "intel_atomic.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_pipe_crc.h"
static const char * const pipe_crc_sources[] = {
@@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 69d908e6a050..3bfb720560c2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,7 +26,7 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -825,8 +825,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_wait_for_register(&dev_priv->uncore,
- psr_status, psr_status_mask, 0, 2000))
+ if (intel_de_wait_for_clear(dev_priv, psr_status,
+ psr_status_mask, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@@ -988,7 +988,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
- err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
+ err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 0b749c28541f..399b1542509f 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_quirks.h"
/*
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index ceda03e5a3d4..adeb1c840976 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,7 +39,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdmi.h"
@@ -274,130 +274,145 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
return false;
}
-#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ }
+
/** Mapping of command numbers to names, for debug output */
-static const struct _sdvo_cmd_name {
+static const struct {
u8 cmd;
const char *name;
} __attribute__ ((packed)) sdvo_cmd_names[] = {
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+ SDVO_CMD_NAME_ENTRY(RESET),
+ SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS),
/* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SET_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER),
/* HDMI op code */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA),
};
+#undef SDVO_CMD_NAME_ENTRY
+
+static const char *sdvo_cmd_name(u8 cmd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd)
+ return sdvo_cmd_names[i].name;
+ }
+
+ return NULL;
+}
+
#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
+ const char *cmd_name;
int i, pos = 0;
#define BUF_LEN 256
char buffer[BUF_LEN];
@@ -412,15 +427,12 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
for (; i < 8; i++) {
BUF_PRINT(" ");
}
- for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
- if (cmd == sdvo_cmd_names[i].cmd) {
- BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
- break;
- }
- }
- if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+
+ cmd_name = sdvo_cmd_name(cmd);
+ if (cmd_name)
+ BUF_PRINT("(%s)", cmd_name);
+ else
BUF_PRINT("(%02X)", cmd);
- }
BUG_ON(pos >= BUF_LEN - 1);
#undef BUF_PRINT
#undef BUF_LEN
@@ -429,15 +441,23 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
}
static const char * const cmd_status_names[] = {
- "Power on",
- "Success",
- "Not supported",
- "Invalid arg",
- "Pending",
- "Target not specified",
- "Scaling not supported"
+ [SDVO_CMD_STATUS_POWER_ON] = "Power on",
+ [SDVO_CMD_STATUS_SUCCESS] = "Success",
+ [SDVO_CMD_STATUS_NOTSUPP] = "Not supported",
+ [SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg",
+ [SDVO_CMD_STATUS_PENDING] = "Pending",
+ [SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified",
+ [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported",
};
+static const char *sdvo_cmd_status(u8 status)
+{
+ if (status < ARRAY_SIZE(cmd_status_names))
+ return cmd_status_names[status];
+ else
+ return NULL;
+}
+
static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len,
bool unlocked)
@@ -516,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
+ const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i, pos = 0;
@@ -562,8 +583,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- BUF_PRINT("(%s)", cmd_status_names[status]);
+ cmd_status = sdvo_cmd_status(status);
+ if (cmd_status)
+ BUF_PRINT("(%s)", cmd_status);
else
BUF_PRINT("(??? %d)", status);
@@ -929,6 +951,20 @@ static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
&audio_state, 1);
}
+static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo,
+ u8 *hbuf_size)
+{
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! However zero means zero. */
+ if (*hbuf_size)
+ (*hbuf_size)++;
+
+ return true;
+}
+
#if 0
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
@@ -972,14 +1008,10 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
set_buf_index, 2))
return false;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
- &hbuf_size, 1))
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
- /* Buffer size is 0 based, hooray! */
- hbuf_size++;
-
- DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
if (hbuf_size < length)
@@ -1030,14 +1062,10 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
- &hbuf_size, 1))
- return -ENXIO;
-
- /* Buffer size is 0 based, hooray! */
- hbuf_size++;
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
+ return false;
- DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
hbuf_size = min_t(unsigned int, length, hbuf_size);
@@ -1893,12 +1921,14 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
&intel_sdvo->hotplug_active, 2);
}
-static bool intel_sdvo_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_sdvo_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
intel_sdvo_enable_hotplug(encoder);
- return intel_encoder_hotplug(encoder, connector);
+ return intel_encoder_hotplug(encoder, connector, irq_received);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 004b52027ae8..dea63be1964f 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -40,8 +40,9 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_atomic_plane.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
#include "intel_psr.h"
@@ -330,6 +331,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
+{
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
+}
+
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -441,9 +448,21 @@ icl_program_input_csc(struct intel_plane *plane,
*/
[DRM_COLOR_YCBCR_BT709] = {
0x7C98, 0x7800, 0x0,
- 0x9EF8, 0x7800, 0xABF8,
+ 0x9EF8, 0x7800, 0xAC00,
0x0, 0x7800, 0x7ED8,
},
+ /*
+ * BT.2020 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.474,
+ * 1.000, -0.1645, -0.5713,
+ * 1.000, 1.8814, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7BC8, 0x7800, 0x0,
+ 0x8928, 0x7800, 0xAA88,
+ 0x0, 0x7800, 0x7F10,
+ },
};
/* Matrix for Limited Range to Full Range Conversion */
@@ -451,26 +470,38 @@ icl_program_input_csc(struct intel_plane *plane,
/*
* BT.601 Limted range YCbCr -> full range RGB
* The matrix required is :
- * [1.164384, 0.000, 1.596370,
- * 1.138393, -0.382500, -0.794598,
- * 1.138393, 1.971696, 0.0000]
+ * [1.164384, 0.000, 1.596027,
+ * 1.164384, -0.39175, -0.812813,
+ * 1.164384, 2.017232, 0.0000]
*/
[DRM_COLOR_YCBCR_BT601] = {
0x7CC8, 0x7950, 0x0,
- 0x8CB8, 0x7918, 0x9C40,
- 0x0, 0x7918, 0x7FC8,
+ 0x8D00, 0x7950, 0x9C88,
+ 0x0, 0x7950, 0x6810,
},
/*
* BT.709 Limited range YCbCr -> full range RGB
* The matrix required is :
- * [1.164, 0.000, 1.833671,
- * 1.138393, -0.213249, -0.532909,
- * 1.138393, 2.112402, 0.0000]
+ * [1.164384, 0.000, 1.792741,
+ * 1.164384, -0.213249, -0.532909,
+ * 1.164384, 2.112402, 0.0000]
*/
[DRM_COLOR_YCBCR_BT709] = {
- 0x7EA8, 0x7950, 0x0,
- 0x8888, 0x7918, 0xADA8,
- 0x0, 0x7918, 0x6870,
+ 0x7E58, 0x7950, 0x0,
+ 0x8888, 0x7950, 0xADA8,
+ 0x0, 0x7950, 0x6870,
+ },
+ /*
+ * BT.2020 Limited range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164, 0.000, 1.678,
+ * 1.164, -0.1873, -0.6504,
+ * 1.164, 2.1417, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7D70, 0x7950, 0x0,
+ 0x8A68, 0x7950, 0xAC00,
+ 0x0, 0x7950, 0x6890,
},
};
const u16 *csc;
@@ -492,8 +523,11 @@ icl_program_input_csc(struct intel_plane *plane,
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+ else
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
@@ -683,6 +717,16 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void i9xx_plane_linear_gamma(u16 gamma[8])
+{
+ /* The points are not evenly spaced. */
+ static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 };
+ int i;
+
+ for (i = 0; i < 8; i++)
+ gamma[i] = (in[i] << 8) / 32;
+}
+
static void
chv_update_csc(const struct intel_plane_state *plane_state)
{
@@ -858,6 +902,31 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
+static void vlv_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
+ gamma[i] << 16 |
+ gamma[i] << 8 |
+ gamma[i]);
+}
+
static void
vlv_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -916,6 +985,7 @@ vlv_update_plane(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
+ vlv_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1013,6 +1083,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
+ sprctl |= SPRITE_INT_GAMMA_DISABLE;
+
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -1033,6 +1105,45 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
+static void ivb_sprite_linear_gamma(u16 gamma[18])
+{
+ int i;
+
+ for (i = 0; i < 17; i++)
+ gamma[i] = (i << 10) / 16;
+
+ gamma[i] = 3 << 10;
+ i++;
+}
+
+static void ivb_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u16 gamma[18];
+ int i;
+
+ ivb_sprite_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ I915_WRITE_FW(SPRGAMC(pipe, i),
+ gamma[i] << 20 |
+ gamma[i] << 10 |
+ gamma[i]);
+
+ I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
+ I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
+ I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+ i++;
+
+ I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
+ I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
+ I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+ i++;
+}
+
static void
ivb_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1099,6 +1210,8 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ ivb_update_gamma(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1224,6 +1337,66 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return dvscntr;
}
+static void g4x_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
+ gamma[i] << 16 |
+ gamma[i] << 8 |
+ gamma[i]);
+}
+
+static void ilk_sprite_linear_gamma(u16 gamma[17])
+{
+ int i;
+
+ for (i = 0; i < 17; i++)
+ gamma[i] = (i << 10) / 16;
+}
+
+static void ilk_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[17];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ ilk_sprite_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
+ gamma[i] << 20 |
+ gamma[i] << 10 |
+ gamma[i]);
+
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ i++;
+}
+
static void
g4x_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1283,6 +1456,11 @@ g4x_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ if (IS_G4X(dev_priv))
+ g4x_update_gamma(plane_state);
+ else
+ ilk_update_gamma(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1347,7 +1525,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
const struct drm_framebuffer *fb = plane_state->base.fb;
const struct drm_rect *src = &plane_state->base.src;
const struct drm_rect *dst = &plane_state->base.dst;
- int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
+ int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
unsigned int cpp = fb->format->cpp[0];
@@ -1358,7 +1536,6 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
crtc_h = drm_rect_height(dst);
src_x = src->x1 >> 16;
- src_y = src->y1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_h = drm_rect_height(src) >> 16;
@@ -1852,28 +2029,7 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const u32 icl_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_hdr_plane_formats[] = {
+static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1882,23 +2038,14 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_ARGB16161616F,
- DRM_FORMAT_ABGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
+ DRM_FORMAT_NV12,
};
-static const u32 skl_planar_formats[] = {
+static const u32 glk_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1912,9 +2059,12 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
};
-static const u32 glk_planar_formats[] = {
+static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1927,13 +2077,15 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
};
-static const u32 icl_planar_formats[] = {
+static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1958,7 +2110,7 @@ static const u32 icl_planar_formats[] = {
DRM_FORMAT_XVYU16161616,
};
-static const u32 icl_hdr_planar_formats[] = {
+static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -2201,9 +2353,6 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return plane_id <= PLANE_SPRITE3;
-
/* Display WA #0870: skl, bxt */
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
return false;
@@ -2217,6 +2366,48 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
return true;
}
+static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(skl_planar_formats);
+ return skl_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(glk_planar_formats);
+ return glk_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ return icl_hdr_plane_formats;
+ } else if (icl_is_nv12_y_plane(plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
+ return icl_sdr_y_plane_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
+ return icl_sdr_uv_plane_formats;
+ }
+}
+
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2270,30 +2461,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
if (icl_is_nv12_y_plane(plane_id))
plane->update_slave = icl_update_slave;
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- formats = icl_hdr_planar_formats;
- num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
- } else if (INTEL_GEN(dev_priv) >= 11) {
- formats = icl_planar_formats;
- num_formats = ARRAY_SIZE(icl_planar_formats);
- } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
- formats = glk_planar_formats;
- num_formats = ARRAY_SIZE(glk_planar_formats);
- } else {
- formats = skl_planar_formats;
- num_formats = ARRAY_SIZE(skl_planar_formats);
- }
- } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
- formats = icl_hdr_plane_formats;
- num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
- } else if (INTEL_GEN(dev_priv) >= 11) {
- formats = icl_plane_formats;
- num_formats = ARRAY_SIZE(icl_plane_formats);
- } else {
- formats = skl_plane_formats;
- num_formats = ARRAY_SIZE(skl_plane_formats);
- }
+ if (INTEL_GEN(dev_priv) >= 11)
+ formats = icl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ formats = glk_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else
+ formats = skl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (plane->has_ccs)
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 500f6bffb139..093a2d156f1e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-#include "i915_drv.h"
#include "intel_display.h"
struct drm_device;
@@ -49,11 +48,6 @@ static inline u8 icl_hdr_plane_mask(void)
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
-static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- return INTEL_GEN(dev_priv) >= 11 &&
- icl_hdr_plane_mask() & BIT(plane_id);
-}
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
new file mode 100644
index 000000000000..85743a43bee2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_dp_mst.h"
+#include "intel_tc.h"
+
+static const char *tc_port_mode_name(enum tc_port_mode mode)
+{
+ static const char * const names[] = {
+ [TC_PORT_TBT_ALT] = "tbt-alt",
+ [TC_PORT_DP_ALT] = "dp-alt",
+ [TC_PORT_LEGACY] = "legacy",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(names)))
+ mode = TC_PORT_TBT_ALT;
+
+ return names[mode];
+}
+
+static bool has_modular_fia(struct drm_i915_private *i915)
+{
+ if (!INTEL_INFO(i915)->display.has_modular_fia)
+ return false;
+
+ return intel_uncore_read(&i915->uncore,
+ PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK;
+}
+
+static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915,
+ enum tc_port tc_port)
+{
+ if (!has_modular_fia(i915))
+ return FIA1;
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ return tc_port / 2;
+}
+
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 lane_mask;
+
+ lane_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ WARN_ON(lane_mask == 0xffffffff);
+
+ return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+}
+
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+ u32 lane_mask;
+
+ if (dig_port->tc_mode != TC_PORT_DP_ALT)
+ return 4;
+
+ lane_mask = 0;
+ with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_mask = intel_tc_port_get_lane_mask(dig_port);
+
+ switch (lane_mask) {
+ default:
+ MISSING_CASE(lane_mask);
+ /* fall-through */
+ case 0x1:
+ case 0x2:
+ case 0x4:
+ case 0x8:
+ return 1;
+ case 0x3:
+ case 0xc:
+ return 2;
+ case 0xf:
+ return 4;
+ }
+}
+
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+
+ switch (required_lanes) {
+ case 1:
+ val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ break;
+ case 2:
+ val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ break;
+ case 4:
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ break;
+ default:
+ MISSING_CASE(required_lanes);
+ }
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
+}
+
+static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
+ u32 live_status_mask)
+{
+ u32 valid_hpd_mask;
+
+ if (dig_port->tc_legacy_port)
+ valid_hpd_mask = BIT(TC_PORT_LEGACY);
+ else
+ valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
+ BIT(TC_PORT_TBT_ALT);
+
+ if (!(live_status_mask & ~valid_hpd_mask))
+ return;
+
+ /* If live status mismatches the VBT flag, trust the live status. */
+ DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+ dig_port->tc_port_name, live_status_mask);
+
+ dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
+}
+
+static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 mask = 0;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
+ dig_port->tc_port_name);
+ return mask;
+ }
+
+ if (val & TC_LIVE_STATE_TBT(tc_port))
+ mask |= BIT(TC_PORT_TBT_ALT);
+ if (val & TC_LIVE_STATE_TC(tc_port))
+ mask |= BIT(TC_PORT_DP_ALT);
+
+ if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
+ mask |= BIT(TC_PORT_LEGACY);
+
+ /* The sink can be connected only in a single mode. */
+ if (!WARN_ON(hweight32(mask) > 1))
+ tc_port_fixup_legacy_flag(dig_port, mask);
+
+ return mask;
+}
+
+static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
+}
+
+static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+ dig_port->tc_port_name,
+ enableddisabled(enable));
+
+ return false;
+ }
+
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ if (!enable)
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
+
+ if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
+ DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
+ dig_port->tc_port_name);
+
+ return true;
+}
+
+static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
+ dig_port->tc_port_name);
+ return true;
+ }
+
+ return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
+}
+
+/*
+ * This function implements the first part of the Connect Flow described by our
+ * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
+ * lanes, EDID, etc) is done as needed in the typical places.
+ *
+ * Unlike the other ports, type-C ports are not available to use as soon as we
+ * get a hotplug. The type-C PHYs can be shared between multiple controllers:
+ * display, USB, etc. As a result, handshaking through FIA is required around
+ * connect and disconnect to cleanly transfer ownership with the controller and
+ * set the type-C power state.
+ */
+static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ int max_lanes;
+
+ if (!icl_tc_phy_status_complete(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY not ready\n",
+ dig_port->tc_port_name);
+ goto out_set_tbt_alt_mode;
+ }
+
+ if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
+ !WARN_ON(dig_port->tc_legacy_port))
+ goto out_set_tbt_alt_mode;
+
+ max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
+ if (dig_port->tc_legacy_port) {
+ WARN_ON(max_lanes != 4);
+ dig_port->tc_mode = TC_PORT_LEGACY;
+
+ return;
+ }
+
+ /*
+ * Now we have to re-check the live state, in case the port recently
+ * became disconnected. Not necessary for legacy mode.
+ */
+ if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
+ DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
+ dig_port->tc_port_name);
+ goto out_set_safe_mode;
+ }
+
+ if (max_lanes < required_lanes) {
+ DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
+ dig_port->tc_port_name,
+ max_lanes, required_lanes);
+ goto out_set_safe_mode;
+ }
+
+ dig_port->tc_mode = TC_PORT_DP_ALT;
+
+ return;
+
+out_set_safe_mode:
+ icl_tc_phy_set_safe_mode(dig_port, true);
+out_set_tbt_alt_mode:
+ dig_port->tc_mode = TC_PORT_TBT_ALT;
+}
+
+/*
+ * See the comment at the connect function. This implements the Disconnect
+ * Flow.
+ */
+static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->tc_mode) {
+ case TC_PORT_LEGACY:
+ /* Nothing to do, we never disconnect from legacy mode */
+ break;
+ case TC_PORT_DP_ALT:
+ icl_tc_phy_set_safe_mode(dig_port, true);
+ dig_port->tc_mode = TC_PORT_TBT_ALT;
+ break;
+ case TC_PORT_TBT_ALT:
+ /* Nothing to do, we stay in TBT-alt mode */
+ break;
+ default:
+ MISSING_CASE(dig_port->tc_mode);
+ }
+}
+
+static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+{
+ if (!icl_tc_phy_status_complete(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
+ dig_port->tc_port_name);
+ return dig_port->tc_mode == TC_PORT_TBT_ALT;
+ }
+
+ if (icl_tc_phy_is_in_safe_mode(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
+ dig_port->tc_port_name);
+
+ return false;
+ }
+
+ return dig_port->tc_mode == TC_PORT_DP_ALT ||
+ dig_port->tc_mode == TC_PORT_LEGACY;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+ bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
+ enum tc_port_mode mode;
+
+ if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
+ return TC_PORT_TBT_ALT;
+
+ mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
+ if (live_status_mask) {
+ enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+
+ if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
+ mode = live_mode;
+ }
+
+ return mode;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+
+ if (live_status_mask)
+ return fls(live_status_mask) - 1;
+
+ return icl_tc_phy_status_complete(dig_port) &&
+ dig_port->tc_legacy_port ? TC_PORT_LEGACY :
+ TC_PORT_TBT_ALT;
+}
+
+static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port_mode old_tc_mode = dig_port->tc_mode;
+
+ intel_display_power_flush_work(i915);
+ WARN_ON(intel_display_power_is_enabled(i915,
+ intel_aux_power_domain(dig_port)));
+
+ icl_tc_phy_disconnect(dig_port);
+ icl_tc_phy_connect(dig_port, required_lanes);
+
+ DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(old_tc_mode),
+ tc_port_mode_name(dig_port->tc_mode));
+}
+
+static void
+intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
+ int refcount)
+{
+ WARN_ON(dig_port->tc_link_refcount);
+ dig_port->tc_link_refcount = refcount;
+}
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
+{
+ struct intel_encoder *encoder = &dig_port->base;
+ int active_links = 0;
+
+ mutex_lock(&dig_port->tc_lock);
+
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ if (dig_port->dp.is_mst)
+ active_links = intel_dp_mst_encoder_active_links(dig_port);
+ else if (encoder->base.crtc)
+ active_links = to_intel_crtc(encoder->base.crtc)->active;
+
+ if (active_links) {
+ if (!icl_tc_phy_is_connected(dig_port))
+ DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
+ intel_tc_port_link_init_refcount(dig_port, active_links);
+
+ goto out;
+ }
+
+ if (dig_port->tc_legacy_port)
+ icl_tc_phy_connect(dig_port, 1);
+
+out:
+ DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+}
+
+/*
+ * The type-C ports are different because even when they are connected, they may
+ * not be available/usable by the graphics driver: see the comment on
+ * icl_tc_phy_connect(). So in our driver instead of adding the additional
+ * concept of "usable" and make everything check for "connected and usable" we
+ * define a port as "connected" when it is not only connected, but also when it
+ * is usable by the rest of the driver. That maintains the old assumption that
+ * connected ports are usable, and avoids exposing to the users objects they
+ * can't really use.
+ */
+bool intel_tc_port_connected(struct intel_digital_port *dig_port)
+{
+ bool is_connected;
+
+ intel_tc_port_lock(dig_port);
+ is_connected = tc_port_live_status_mask(dig_port) &
+ BIT(dig_port->tc_mode);
+ intel_tc_port_unlock(dig_port);
+
+ return is_connected;
+}
+
+static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
+
+ mutex_lock(&dig_port->tc_lock);
+
+ if (!dig_port->tc_link_refcount &&
+ intel_tc_port_needs_reset(dig_port))
+ intel_tc_port_reset_mode(dig_port, required_lanes);
+
+ WARN_ON(dig_port->tc_lock_wakeref);
+ dig_port->tc_lock_wakeref = wakeref;
+}
+
+void intel_tc_port_lock(struct intel_digital_port *dig_port)
+{
+ __intel_tc_port_lock(dig_port, 1);
+}
+
+void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
+
+ mutex_unlock(&dig_port->tc_lock);
+
+ intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
+ wakeref);
+}
+
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
+{
+ return mutex_is_locked(&dig_port->tc_lock) ||
+ dig_port->tc_link_refcount;
+}
+
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ __intel_tc_port_lock(dig_port, required_lanes);
+ dig_port->tc_link_refcount++;
+ intel_tc_port_unlock(dig_port);
+}
+
+void intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+ mutex_lock(&dig_port->tc_lock);
+ dig_port->tc_link_refcount--;
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ if (WARN_ON(tc_port == PORT_TC_NONE))
+ return;
+
+ snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
+ "%c/TC#%d", port_name(port), tc_port + 1);
+
+ mutex_init(&dig_port->tc_lock);
+ dig_port->tc_legacy_port = is_legacy;
+ dig_port->tc_link_refcount = 0;
+ dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
new file mode 100644
index 000000000000..783d75531435
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TC_H__
+#define __INTEL_TC_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct intel_digital_port;
+
+bool intel_tc_port_connected(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes);
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
+void intel_tc_port_lock(struct intel_digital_port *dig_port);
+void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes);
+void intel_tc_port_put_link(struct intel_digital_port *dig_port);
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+
+#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 0a95df6c6a57..b70221f5112a 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -37,7 +37,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 5ddbe71ab423..dfcd156b5094 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -310,13 +310,13 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_F,
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
+ TGL_DDC_BUS_DDI_C,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
ICL_DDC_BUS_PORT_4,
- MCC_DDC_BUS_DDI_A = 0x1,
- MCC_DDC_BUS_DDI_B,
- MCC_DDC_BUS_DDI_C = 0x4,
+ TGL_DDC_BUS_PORT_5,
+ TGL_DDC_BUS_PORT_6,
};
#define DP_AUX_A 0x40
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index f413904a3e96..d4fb7f16f9f6 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,7 +9,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
@@ -459,17 +459,23 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
- * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
- * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+ * On ICL VDSC/joining for eDP transcoder uses a separate power well,
+ * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain.
* For any other transcoder, VDSC/joining uses the power well associated
* with the pipe/transcoder in use. Hence another reference on the
* transcoder power domain will suffice.
+ *
+ * On TGL we have the same mapping, but for transcoder A (the special
+ * TRANSCODER_EDP is gone).
*/
- if (cpu_transcoder == TRANSCODER_EDP)
- return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A)
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+ else if (cpu_transcoder == TRANSCODER_EDP)
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else
return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index e272d826210a..a71b22bdd95b 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -34,7 +34,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
@@ -84,9 +84,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port), mask, mask,
- 100))
+ if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
@@ -154,10 +153,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port),
- data_mask, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ data_mask, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
@@ -168,10 +165,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port),
- ctrl_mask, 0,
- 50)) {
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ ctrl_mask, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -180,10 +175,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_INTR_STAT(port),
- data_mask, data_mask,
- 50))
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
+ data_mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -240,9 +233,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_INTR_STAT(port), mask, mask,
- 100))
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -359,11 +350,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_MIPIIO_PORT_POWERED,
- GLK_MIPIIO_PORT_POWERED,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPIO port is powergated\n");
}
@@ -385,11 +373,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY,
- GLK_PHY_STATUS_PORT_READY,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not ON\n");
}
@@ -413,11 +398,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_ULPS_NOT_ACTIVE,
- 0,
- 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_ULPS_NOT_ACTIVE, 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@@ -440,21 +422,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_DATA_LANE_STOP_STATE,
- GLK_DATA_LANE_STOP_STATE,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_DATA_LANE_STOP_STATE, 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_MIPI_PORT_CTRL(port),
- AFE_LATCHOUT,
- AFE_LATCHOUT,
- 20))
+ if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT, 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@@ -554,17 +530,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_MIPIIO_PORT_POWERED, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
}
}
@@ -583,9 +557,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
@@ -633,9 +606,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
- intel_wait_for_register(&dev_priv->uncore,
- port_ctrl, AFE_LATCHOUT, 0,
- 30))
+ intel_de_wait_for_clear(dev_priv, port_ctrl,
+ AFE_LATCHOUT, 30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
@@ -1644,7 +1616,7 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector)
return intel_dsi_get_panel_orientation(connector);
}
-static void intel_dsi_add_properties(struct intel_connector *connector)
+static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1983,7 +1955,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
- intel_dsi_add_properties(intel_connector);
+ vlv_dsi_add_properties(intel_connector);
return;
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index f016a776a39e..95f39cd0ce02 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
@@ -246,11 +246,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED,
- 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
@@ -530,11 +527,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED,
- BXT_DSI_PLL_LOCKED,
- 1)) {
+ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
index 07e7b8b840ea..7e73aa587967 100644
--- a/drivers/gpu/drm/i915/gem/Makefile
+++ b/drivers/gpu/drm/i915/gem/Makefile
@@ -1 +1,5 @@
-include $(src)/Makefile.header-test # Extra header tests
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
+# Extra header tests
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test
deleted file mode 100644
index 61e06cbb4b32..000000000000
--- a/drivers/gpu/drm/i915/gem/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(wildcard $(src)/*.h))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6ad93a09968c..3d4f5775a4ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- struct reservation_object_list *list;
+ struct dma_resv_list *list;
unsigned int seq;
int err;
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled_rcu(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 5295285d5843..b9f504ba3b32 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -8,87 +8,67 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
-
-static DEFINE_SPINLOCK(clflush_lock);
+#include "i915_sw_fence_work.h"
+#include "i915_trace.h"
struct clflush {
- struct dma_fence dma; /* Must be first for dma_fence_free() */
- struct i915_sw_fence wait;
- struct work_struct work;
+ struct dma_fence_work base;
struct drm_i915_gem_object *obj;
};
-static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
-{
- return DRIVER_NAME;
-}
-
-static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
-{
- return "clflush";
-}
-
-static void i915_clflush_release(struct dma_fence *fence)
-{
- struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
-
- i915_sw_fence_fini(&clflush->wait);
-
- BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
- dma_fence_free(&clflush->dma);
-}
-
-static const struct dma_fence_ops i915_clflush_ops = {
- .get_driver_name = i915_clflush_get_driver_name,
- .get_timeline_name = i915_clflush_get_timeline_name,
- .release = i915_clflush_release,
-};
-
-static void __i915_do_clflush(struct drm_i915_gem_object *obj)
+static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
}
-static void i915_clflush_work(struct work_struct *work)
+static int clflush_work(struct dma_fence_work *base)
{
- struct clflush *clflush = container_of(work, typeof(*clflush), work);
- struct drm_i915_gem_object *obj = clflush->obj;
-
- if (i915_gem_object_pin_pages(obj)) {
- DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
- goto out;
- }
+ struct clflush *clflush = container_of(base, typeof(*clflush), base);
+ struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
+ int err;
- __i915_do_clflush(obj);
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto put;
+ __do_clflush(obj);
i915_gem_object_unpin_pages(obj);
-out:
+put:
i915_gem_object_put(obj);
+ return err;
+}
+
+static void clflush_release(struct dma_fence_work *base)
+{
+ struct clflush *clflush = container_of(base, typeof(*clflush), base);
- dma_fence_signal(&clflush->dma);
- dma_fence_put(&clflush->dma);
+ if (clflush->obj)
+ i915_gem_object_put(clflush->obj);
}
-static int __i915_sw_fence_call
-i915_clflush_notify(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify state)
+static const struct dma_fence_work_ops clflush_ops = {
+ .name = "clflush",
+ .work = clflush_work,
+ .release = clflush_release,
+};
+
+static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
{
- struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
+ struct clflush *clflush;
- switch (state) {
- case FENCE_COMPLETE:
- schedule_work(&clflush->work);
- break;
+ GEM_BUG_ON(!obj->cache_dirty);
- case FENCE_FREE:
- dma_fence_put(&clflush->dma);
- break;
- }
+ clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ if (!clflush)
+ return NULL;
- return NOTIFY_DONE;
+ dma_fence_work_init(&clflush->base, &clflush_ops);
+ clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
+
+ return clflush;
}
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
@@ -126,33 +106,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
clflush = NULL;
if (!(flags & I915_CLFLUSH_SYNC))
- clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ clflush = clflush_work_create(obj);
if (clflush) {
- GEM_BUG_ON(!obj->cache_dirty);
-
- dma_fence_init(&clflush->dma,
- &i915_clflush_ops,
- &clflush_lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
- i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
-
- clflush->obj = i915_gem_object_get(obj);
- INIT_WORK(&clflush->work, i915_clflush_work);
-
- dma_fence_get(&clflush->dma);
-
- i915_sw_fence_await_reservation(&clflush->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
+ i915_sw_fence_await_reservation(&clflush->base.chain,
+ obj->base.resv, NULL, true,
+ I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
-
- reservation_object_add_excl_fence(obj->base.resv,
- &clflush->dma);
-
- i915_sw_fence_commit(&clflush->wait);
+ dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+ dma_fence_work_commit(&clflush->base);
} else if (obj->mm.pages) {
- __i915_do_clflush(obj);
+ __do_clflush(obj);
} else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 1fdab0767a47..f99920652751 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -2,10 +2,13 @@
/*
* Copyright © 2019 Intel Corporation
*/
-#include "i915_gem_client_blt.h"
+#include "i915_drv.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
-#include "intel_drv.h"
struct i915_sleeve {
struct i915_vma *vma;
@@ -72,7 +75,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
vma->ops = &proxy_vma_ops;
sleeve->vma = vma;
- sleeve->obj = i915_gem_object_get(obj);
sleeve->pages = pages;
sleeve->page_sizes = *page_sizes;
@@ -85,7 +87,6 @@ err_free:
static void destroy_sleeve(struct i915_sleeve *sleeve)
{
- i915_gem_object_put(sleeve->obj);
kfree(sleeve);
}
@@ -154,21 +155,23 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->gem_context->i915;
- struct drm_i915_gem_object *obj = w->sleeve->obj;
+ struct drm_i915_private *i915 = w->ce->engine->i915;
+ struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
+ struct i915_vma *batch;
int err = w->dma.error;
if (unlikely(err))
goto out_signal;
if (obj->cache_dirty) {
- obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(w->sleeve->pages);
obj->cache_dirty = false;
}
+ obj->read_domains = I915_GEM_GPU_DOMAINS;
+ obj->write_domain = 0;
/* XXX: we need to kill this */
mutex_lock(&i915->drm.struct_mutex);
@@ -176,10 +179,16 @@ static void clear_pages_worker(struct work_struct *work)
if (unlikely(err))
goto out_unlock;
- rq = i915_request_create(w->ce);
+ batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
+
+ rq = intel_context_create_request(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
/* There's no way the fence has signalled */
@@ -187,20 +196,28 @@ static void clear_pages_worker(struct work_struct *work)
clear_pages_dma_fence_cb))
GEM_BUG_ON(1);
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
if (w->ce->engine->emit_init_breadcrumb) {
err = w->ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
goto out_request;
}
- /* XXX: more feverish nightmares await */
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ /*
+ * w->dma is already exported via (vma|obj)->resv we need only
+ * keep track of the GPU activity within this vma/request, and
+ * propagate the signal from the request to w->dma.
+ */
+ err = i915_active_ref(&vma->active, rq->timeline, rq);
if (err)
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, w->value);
+ err = w->ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err)) {
i915_request_skip(rq, err);
@@ -208,6 +225,8 @@ out_request:
}
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
@@ -248,14 +267,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
struct i915_page_sizes *page_sizes,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
- sleeve = create_sleeve(vm, obj, pages, page_sizes);
+ sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);
@@ -273,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
- dma_fence_init(&work->dma,
- &clear_pages_work_ops,
- &fence_lock,
- i915->mm.unordered_timeline,
- 0);
+ dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj);
@@ -288,7 +300,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
- reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0f2c22a3bcb6..1cdfe05514c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -70,6 +70,7 @@
#include <drm/i915_drm.h>
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_user.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
@@ -158,7 +159,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
if (!engine)
return ERR_PTR(-EINVAL);
- idx = engine->id;
+ idx = engine->legacy_idx;
} else {
idx = ci->engine_instance;
}
@@ -172,7 +173,9 @@ static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
lockdep_assert_held(&i915->contexts.mutex);
- if (INTEL_GEN(i915) >= 11)
+ if (INTEL_GEN(i915) >= 12)
+ max = GEN12_MAX_CONTEXT_HW_ID;
+ else if (INTEL_GEN(i915) >= 11)
max = GEN11_MAX_CONTEXT_HW_ID;
else if (USES_GUC_SUBMISSION(i915))
/*
@@ -278,6 +281,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
{
+ const struct intel_gt *gt = &ctx->i915->gt;
struct intel_engine_cs *engine;
struct i915_gem_engines *e;
enum intel_engine_id id;
@@ -287,7 +291,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return ERR_PTR(-ENOMEM);
init_rcu_head(&e->rcu);
- for_each_engine(engine, ctx->i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
ce = intel_context_create(ctx, engine);
@@ -297,8 +301,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
}
e->engines[id] = ce;
+ e->num_engines = id + 1;
}
- e->num_engines = id;
return e;
}
@@ -316,7 +320,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
- i915_timeline_put(ctx->timeline);
+ intel_timeline_put(ctx->timeline);
kfree(ctx->name);
put_pid(ctx->pid);
@@ -397,30 +401,6 @@ static void context_close(struct i915_gem_context *ctx)
i915_gem_context_put(ctx);
}
-static u32 default_desc_template(const struct drm_i915_private *i915,
- const struct i915_address_space *vm)
-{
- u32 address_mode;
- u32 desc;
-
- desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
-
- address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (vm && i915_vm_is_4lvl(vm))
- address_mode = INTEL_LEGACY_64B_CONTEXT;
- desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
- if (IS_GEN(i915, 8))
- desc |= GEN8_CTX_L3LLC_COHERENT;
-
- /* TODO: WaDisableLiteRestore when we start using semaphore
- * signalling between Command Streamers
- * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
- */
-
- return desc;
-}
-
static struct i915_gem_context *
__create_context(struct drm_i915_private *i915)
{
@@ -458,10 +438,6 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
- ctx->ring_size = 4 * PAGE_SIZE;
- ctx->desc_template =
- default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
-
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -472,13 +448,34 @@ err_free:
return ERR_PTR(err);
}
+static void
+context_apply_all(struct i915_gem_context *ctx,
+ void (*fn)(struct intel_context *ce, void *data),
+ void *data)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ fn(ce, data);
+ i915_gem_context_unlock_engines(ctx);
+}
+
+static void __apply_ppgtt(struct intel_context *ce, void *vm)
+{
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(vm);
+}
+
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old = ctx->vm;
+ GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
+
ctx->vm = i915_vm_get(vm);
- ctx->desc_template = default_desc_template(ctx->i915, vm);
+ context_apply_all(ctx, __apply_ppgtt, vm);
return old;
}
@@ -494,6 +491,29 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
i915_vm_put(vm);
}
+static void __set_timeline(struct intel_timeline **dst,
+ struct intel_timeline *src)
+{
+ struct intel_timeline *old = *dst;
+
+ *dst = src ? intel_timeline_get(src) : NULL;
+
+ if (old)
+ intel_timeline_put(old);
+}
+
+static void __apply_timeline(struct intel_context *ce, void *timeline)
+{
+ __set_timeline(&ce->timeline, timeline);
+}
+
+static void __assign_timeline(struct i915_gem_context *ctx,
+ struct intel_timeline *timeline)
+{
+ __set_timeline(&ctx->timeline, timeline);
+ context_apply_all(ctx, __apply_timeline, timeline);
+}
+
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
{
@@ -528,15 +548,16 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
- timeline = i915_timeline_create(dev_priv, NULL);
+ timeline = intel_timeline_create(&dev_priv->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
}
- ctx->timeline = timeline;
+ __assign_timeline(ctx, timeline);
+ intel_timeline_put(timeline);
}
trace_i915_context_create(ctx);
@@ -544,53 +565,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ctx;
}
-/**
- * i915_gem_context_create_gvt - create a GVT GEM context
- * @dev: drm device *
- *
- * This function is used to create a GVT specific GEM context.
- *
- * Returns:
- * pointer to i915_gem_context on success, error pointer if failed
- *
- */
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev)
-{
- struct i915_gem_context *ctx;
- int ret;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return ERR_PTR(-ENODEV);
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
- ctx = i915_gem_create_context(to_i915(dev), 0);
- if (IS_ERR(ctx))
- goto out;
-
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret) {
- context_close(ctx);
- ctx = ERR_PTR(ret);
- goto out;
- }
-
- ctx->file_priv = ERR_PTR(-EBADF);
- i915_gem_context_set_closed(ctx); /* not user accessible */
- i915_gem_context_clear_bannable(ctx);
- i915_gem_context_set_force_single_submission(ctx);
- if (!USES_GUC_SUBMISSION(to_i915(dev)))
- ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
-
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-out:
- mutex_unlock(&dev->struct_mutex);
- return ctx;
-}
-
static void
destroy_kernel_context(struct i915_gem_context **ctxp)
{
@@ -622,7 +596,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
i915_gem_context_clear_bannable(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
- ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -644,20 +617,13 @@ static void init_contexts(struct drm_i915_private *i915)
init_llist_head(&i915->contexts.free_list);
}
-static bool needs_preempt_context(struct drm_i915_private *i915)
-{
- return HAS_EXECLISTS(i915);
-}
-
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
- GEM_BUG_ON(dev_priv->preempt_context);
- intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -677,15 +643,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
dev_priv->kernel_context = ctx;
- /* highest priority; preempting task */
- if (needs_preempt_context(dev_priv)) {
- ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
- if (!IS_ERR(ctx))
- dev_priv->preempt_context = ctx;
- else
- DRM_ERROR("Failed to create preempt context; disabling preemption\n");
- }
-
DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(dev_priv)->has_logical_contexts ?
"logical" : "fake");
@@ -696,8 +653,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
- if (i915->preempt_context)
- destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
@@ -923,8 +878,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (!cb)
return -ENOMEM;
- i915_active_init(i915, &cb->base, cb_retire);
- i915_active_acquire(&cb->base);
+ i915_active_init(i915, &cb->base, NULL, cb_retire);
+ err = i915_active_acquire(&cb->base);
+ if (err) {
+ kfree(cb);
+ return err;
+ }
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
@@ -951,7 +910,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
- err = i915_active_ref(&cb->base, rq->fence.context, rq);
+ err = i915_active_ref(&cb->base, rq->timeline, rq);
i915_request_add(rq);
if (err)
@@ -1019,7 +978,7 @@ static void set_ppgtt_barrier(void *data)
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_address_space *vm = rq->gem_context->vm;
+ struct i915_address_space *vm = rq->hw_context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
@@ -1128,9 +1087,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
- ctx->vm = old;
- ctx->desc_template = default_desc_template(ctx->i915, old);
- i915_vm_put(vm);
+ i915_vm_put(__set_ppgtt(ctx, old));
+ i915_vm_put(old);
}
unlock:
@@ -1187,26 +1145,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (IS_ERR(rq))
return PTR_ERR(rq);
- /* Queue this switch after all other activity by this context. */
- ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
- if (ret)
- goto out_add;
+ /* Serialise with the remote context */
+ ret = intel_context_prepare_remote_request(ce, rq);
+ if (ret == 0)
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
- /*
- * Guarantee context image and the timeline remains pinned until the
- * modifying request is retired by setting the ce activity tracker.
- *
- * But we only need to take one pin on the account of it. Or in other
- * words transfer the pinned ce object to tracked active request.
- */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
- ret = i915_active_ref(&ce->active, rq->fence.context, rq);
- if (ret)
- goto out_add;
-
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
-
-out_add:
i915_request_add(rq);
return ret;
}
@@ -1217,7 +1160,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
{
int ret;
- GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
ret = intel_context_lock_pinned(ce);
if (ret)
@@ -1239,7 +1182,7 @@ unlock:
static int
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
- struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_private *i915 = ce->engine->i915;
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
@@ -1636,6 +1579,7 @@ set_engines(struct i915_gem_context *ctx,
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
+ struct intel_context *ce;
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
__free_engines(set.engines, n);
@@ -1658,11 +1602,13 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
- set.engines->engines[n] = intel_context_create(ctx, engine);
- if (!set.engines->engines[n]) {
+ ce = intel_context_create(ctx, engine);
+ if (IS_ERR(ce)) {
__free_engines(set.engines, n);
- return -ENOMEM;
+ return PTR_ERR(ce);
}
+
+ set.engines->engines[n] = ce;
}
set.engines->num_engines = num_engines;
@@ -1776,7 +1722,7 @@ get_engines(struct i915_gem_context *ctx,
if (e->engines[n]) {
ci.engine_class = e->engines[n]->engine->uabi_class;
- ci.engine_instance = e->engines[n]->engine->instance;
+ ci.engine_instance = e->engines[n]->engine->uabi_instance;
}
if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
@@ -2011,13 +1957,8 @@ unlock:
static int clone_timeline(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- if (src->timeline) {
- GEM_BUG_ON(src->timeline == dst->timeline);
-
- if (dst->timeline)
- i915_timeline_put(dst->timeline);
- dst->timeline = i915_timeline_get(src->timeline);
- }
+ if (src->timeline)
+ __assign_timeline(dst, src->timeline);
return 0;
}
@@ -2141,7 +2082,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
- ret = i915_terminally_wedged(i915);
+ ret = intel_gt_terminally_wedged(&i915->gt);
if (ret)
return ret;
@@ -2287,8 +2228,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->size = 0;
if (ctx->vm)
args->value = ctx->vm->total;
- else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+ else if (to_i915(dev)->ggtt.alias)
+ args->value = to_i915(dev)->ggtt.alias->vm.total;
else
args->value = to_i915(dev)->ggtt.vm.total;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 9691dd062f72..176978608b6f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -141,8 +141,6 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file);
void i915_gem_context_release(struct kref *ctx_ref);
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev);
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -198,12 +196,6 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
}
static inline struct intel_context *
-i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
-{
- return i915_gem_context_engines(ctx)->engines[idx];
-}
-
-static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
struct intel_context *ce = ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cc513410eeef..260d59cc3de8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -26,7 +26,7 @@ struct pid;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_address_space;
-struct i915_timeline;
+struct intel_timeline;
struct intel_ring;
struct i915_gem_engines {
@@ -77,7 +77,7 @@ struct i915_gem_context {
struct i915_gem_engines __rcu *engines;
struct mutex engines_mutex; /* guards writes to engines */
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
/**
* @vm: unique address space (GTT)
@@ -169,11 +169,6 @@ struct i915_gem_context {
struct i915_sched_attr sched;
- /** ring_size: size for allocating the per-engine ring buffer */
- u32 ring_size;
- /** desc_template: invariant fields for the HW context descriptor */
- u32 desc_template;
-
/** guilty_count: How many times this context has caused a GPU hang. */
atomic_t guilty_count;
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index cbf1701d3acc..96ce95c8ac5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/highmem.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_drv.h"
#include "i915_gem_object.h"
@@ -204,8 +204,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -222,7 +221,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 2e3ce2a69653..9c58e8fac1d9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -221,6 +221,8 @@ restart:
* state and so involves less work.
*/
if (atomic_read(&obj->bind_count)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
/* Before we change the PTE, the GPU must not be accessing it.
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
@@ -232,18 +234,30 @@ restart:
if (ret)
return ret;
- if (!HAS_LLC(to_i915(obj->base.dev)) &&
- cache_level != I915_CACHE_NONE) {
- /* Access to snoopable pages through the GTT is
+ if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
+ intel_wakeref_t wakeref =
+ intel_runtime_pm_get(&i915->runtime_pm);
+
+ /*
+ * Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
* userspace to refault in the pages and we can
* then double check if the GTT mapping is still
* valid for that pointer access.
*/
- i915_gem_object_release_mmap(obj);
+ ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
+ if (ret) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ wakeref);
+ return ret;
+ }
+
+ if (obj->userfault_count)
+ __i915_gem_object_release_mmap(obj);
- /* As we no longer need a fence for GTT access,
+ /*
+ * As we no longer need a fence for GTT access,
* we can relinquish it now (and so prevent having
* to steal a fence from someone else on the next
* fence request). Note GPU activity would have
@@ -251,12 +265,17 @@ restart:
* supposed to be linear.
*/
for_each_ggtt_vma(vma, obj) {
- ret = i915_vma_put_fence(vma);
+ ret = i915_vma_revoke_fence(vma);
if (ret)
- return ret;
+ break;
}
+ mutex_unlock(&i915->ggtt.vm.mutex);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ if (ret)
+ return ret;
} else {
- /* We either have incoherent backing store and
+ /*
+ * We either have incoherent backing store and
* so no GTT access or the architecture is fully
* coherent. In such cases, existing GTT mmaps
* ignore the cache bit in the PTE and we can
@@ -551,13 +570,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -661,9 +673,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unlock(obj);
- if (write_domain != 0)
- intel_fb_obj_invalidate(obj,
- fb_write_origin(obj, write_domain));
+ if (write_domain)
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -783,7 +794,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 41dab9ea33cd..b5f6937369ea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -5,7 +5,7 @@
*/
#include <linux/intel-iommu.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,13 +16,15 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine_pool.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "i915_gem_ioctls.h"
+#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_ioctls.h"
#include "i915_trace.h"
-#include "intel_drv.h"
enum {
FORCE_CPU_RELOC = 1,
@@ -222,7 +224,6 @@ struct i915_execbuffer {
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
- struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
@@ -696,7 +697,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
- err = i915_gem_evict_vm(eb->vm);
+ err = i915_gem_evict_vm(eb->context->vm);
if (err)
return err;
break;
@@ -724,12 +725,8 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->vm) {
- eb->vm = ctx->vm;
+ if (ctx->vm)
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
- } else {
- eb->vm = &eb->i915->ggtt.vm;
- }
eb->context_flags = 0;
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
@@ -738,63 +735,6 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
-static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
-{
- struct i915_request *rq;
-
- /*
- * Completely unscientific finger-in-the-air estimates for suitable
- * maximum user request size (to avoid blocking) and then backoff.
- */
- if (intel_ring_update_space(ring) >= PAGE_SIZE)
- return NULL;
-
- /*
- * Find a request that after waiting upon, there will be at least half
- * the ring available. The hysteresis allows us to compete for the
- * shared ring and should mean that we sleep less often prior to
- * claiming our resources, but not so long that the ring completely
- * drains before we can submit our next request.
- */
- list_for_each_entry(rq, &ring->request_list, ring_link) {
- if (__intel_ring_space(rq->postfix,
- ring->emit, ring->size) > ring->size / 2)
- break;
- }
- if (&rq->ring_link == &ring->request_list)
- return NULL; /* weird, we will check again later for real */
-
- return i915_request_get(rq);
-}
-
-static int eb_wait_for_ring(const struct i915_execbuffer *eb)
-{
- struct i915_request *rq;
- int ret = 0;
-
- /*
- * Apply a light amount of backpressure to prevent excessive hogs
- * from blocking waiting for space whilst holding struct_mutex and
- * keeping all of their resources pinned.
- */
-
- rq = __eb_wait_for_ring(eb->context->ring);
- if (rq) {
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0)
- ret = -EINTR;
-
- i915_request_put(rq);
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- }
-
- return ret;
-}
-
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
@@ -831,7 +771,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_vma;
}
- vma = i915_vma_instance(obj, eb->vm, NULL);
+ vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -994,7 +934,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
- i915_gem_chipset_flush(cache->rq->i915);
+ intel_gt_chipset_flush(cache->rq->engine->gt);
i915_request_add(cache->rq);
cache->rq = NULL;
@@ -1018,11 +958,12 @@ static void reloc_cache_reset(struct reloc_cache *cache)
kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
- wmb();
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ if (cache->node.allocated) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
@@ -1077,11 +1018,15 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
void *vaddr;
if (cache->vaddr) {
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int err;
+ if (i915_gem_object_is_tiled(obj))
+ return ERR_PTR(-EINVAL);
+
if (use_cpu_reloc(cache, obj))
return NULL;
@@ -1093,8 +1038,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
@@ -1105,12 +1050,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
- err = i915_vma_put_fence(vma);
- if (err) {
- i915_vma_unpin(vma);
- return ERR_PTR(err);
- }
-
cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
}
@@ -1118,7 +1057,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
- wmb();
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1201,25 +1139,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
- struct drm_i915_gem_object *obj;
+ struct intel_engine_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- cmd = i915_gem_object_pin_map(obj,
+ cmd = i915_gem_object_pin_map(pool->obj,
cache->has_llc ?
I915_MAP_FORCE_WB :
I915_MAP_FORCE_WC);
- i915_gem_object_unpin_pages(obj);
- if (IS_ERR(cmd))
- return PTR_ERR(cmd);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_pool;
+ }
- batch = i915_vma_instance(obj, vma->vm, NULL);
+ batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
@@ -1235,6 +1174,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin;
}
+ err = intel_engine_pool_mark_active(pool, rq);
+ if (err)
+ goto err_request;
+
err = reloc_move_to_gpu(rq, vma);
if (err)
goto err_request;
@@ -1246,8 +1189,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto skip_request;
i915_vma_lock(batch);
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- err = i915_vma_move_to_active(batch, rq, 0);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
@@ -1260,7 +1204,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq_size = 0;
/* Return with batch mapping (cmd) still pinned */
- return 0;
+ goto out_pool;
skip_request:
i915_request_skip(rq, err);
@@ -1269,7 +1213,9 @@ err_request:
err_unpin:
i915_vma_unpin(batch);
err_unmap:
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(pool->obj);
+out_pool:
+ intel_engine_pool_put(pool);
return err;
}
@@ -1317,7 +1263,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !reservation_object_test_signaled_rcu(vma->resv, true))) {
+ !dma_resv_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -1952,7 +1898,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
eb->exec = NULL;
/* Unconditionally flush any chipset caches (for streaming writes). */
- i915_gem_chipset_flush(eb->i915);
+ intel_gt_chipset_flush(eb->engine->gt);
return 0;
err_skip:
@@ -2011,18 +1957,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
{
- struct drm_i915_gem_object *shadow_batch_obj;
+ struct intel_engine_pool_node *pool;
struct i915_vma *vma;
int err;
- shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
- PAGE_ALIGN(eb->batch_len));
- if (IS_ERR(shadow_batch_obj))
- return ERR_CAST(shadow_batch_obj);
+ pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+ if (IS_ERR(pool))
+ return ERR_CAST(pool);
err = intel_engine_cmd_parser(eb->engine,
eb->batch->obj,
- shadow_batch_obj,
+ pool->obj,
eb->batch_start_offset,
eb->batch_len,
is_master);
@@ -2031,12 +1976,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma = NULL;
else
vma = ERR_PTR(err);
- goto out;
+ goto err;
}
- vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
- goto out;
+ goto err;
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
@@ -2044,16 +1989,24 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
-out:
- i915_gem_object_unpin_pages(shadow_batch_obj);
+ vma->private = pool;
+ return vma;
+
+err:
+ intel_engine_pool_put(pool);
return vma;
}
static void
add_to_client(struct i915_request *rq, struct drm_file *file)
{
- rq->file_priv = file->driver_priv;
- list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ rq->file_priv = file_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ list_add_tail(&rq->client_link, &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
static int eb_submit(struct i915_execbuffer *eb)
@@ -2093,6 +2046,12 @@ static int eb_submit(struct i915_execbuffer *eb)
return 0;
}
+static int num_vcs_engines(const struct drm_i915_private *i915)
+{
+ return hweight64(INTEL_INFO(i915)->engine_mask &
+ GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
+}
+
/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
@@ -2105,8 +2064,8 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
- file_priv->bsd_engine = atomic_fetch_xor(1,
- &dev_priv->mm.bsd_engine_dispatch_index);
+ file_priv->bsd_engine =
+ get_random_int() % num_vcs_engines(dev_priv);
return file_priv->bsd_engine;
}
@@ -2119,15 +2078,80 @@ static const enum intel_engine_id user_ring_map[] = {
[I915_EXEC_VEBOX] = VECS0
};
-static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+static struct i915_request *eb_throttle(struct intel_context *ce)
+{
+ struct intel_ring *ring = ce->ring;
+ struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq;
+
+ /*
+ * Completely unscientific finger-in-the-air estimates for suitable
+ * maximum user request size (to avoid blocking) and then backoff.
+ */
+ if (intel_ring_update_space(ring) >= PAGE_SIZE)
+ return NULL;
+
+ /*
+ * Find a request that after waiting upon, there will be at least half
+ * the ring available. The hysteresis allows us to compete for the
+ * shared ring and should mean that we sleep less often prior to
+ * claiming our resources, but not so long that the ring completely
+ * drains before we can submit our next request.
+ */
+ list_for_each_entry(rq, &tl->requests, link) {
+ if (rq->ring != ring)
+ continue;
+
+ if (__intel_ring_space(rq->postfix,
+ ring->emit, ring->size) > ring->size / 2)
+ break;
+ }
+ if (&rq->link == &tl->requests)
+ return NULL; /* weird, we will check again later for real */
+
+ return i915_request_get(rq);
+}
+
+static int
+__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
int err;
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return 0;
+
+ err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
+ if (err)
+ return err;
+
+ err = __intel_context_do_pin(ce);
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+
+ return err;
+}
+
+static void
+__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+ if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ return;
+
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ intel_context_unpin(ce);
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+}
+
+static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+ struct intel_timeline *tl;
+ struct i915_request *rq;
+ int err;
+
/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- err = i915_terminally_wedged(eb->i915);
+ err = intel_gt_terminally_wedged(ce->engine->gt);
if (err)
return err;
@@ -2136,18 +2160,64 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = intel_context_pin(ce);
+ err = __eb_pin_context(eb, ce);
if (err)
return err;
+ /*
+ * Take a local wakeref for preparing to dispatch the execbuf as
+ * we expect to access the hardware fairly frequently in the
+ * process, and require the engine to be kept awake between accesses.
+ * Upon dispatch, we acquire another prolonged wakeref that we hold
+ * until the timeline is idle, which in turn releases the wakeref
+ * taken on the engine, and the parent device.
+ */
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_unpin;
+ }
+
+ intel_context_enter(ce);
+ rq = eb_throttle(ce);
+
+ intel_context_timeline_unlock(tl);
+
+ if (rq) {
+ if (i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ i915_request_put(rq);
+ err = -EINTR;
+ goto err_exit;
+ }
+
+ i915_request_put(rq);
+ }
+
eb->engine = ce->engine;
eb->context = ce;
return 0;
+
+err_exit:
+ mutex_lock(&tl->mutex);
+ intel_context_exit(ce);
+ intel_context_timeline_unlock(tl);
+err_unpin:
+ __eb_unpin_context(eb, ce);
+ return err;
}
-static void eb_unpin_context(struct i915_execbuffer *eb)
+static void eb_unpin_engine(struct i915_execbuffer *eb)
{
- intel_context_unpin(eb->context);
+ struct intel_context *ce = eb->context;
+ struct intel_timeline *tl = ce->timeline;
+
+ mutex_lock(&tl->mutex);
+ intel_context_exit(ce);
+ mutex_unlock(&tl->mutex);
+
+ __eb_unpin_context(eb, ce);
}
static unsigned int
@@ -2165,7 +2235,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
return -1;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
+ if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2192,9 +2262,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
static int
-eb_select_engine(struct i915_execbuffer *eb,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args)
+eb_pin_engine(struct i915_execbuffer *eb,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
{
struct intel_context *ce;
unsigned int idx;
@@ -2209,7 +2279,7 @@ eb_select_engine(struct i915_execbuffer *eb,
if (IS_ERR(ce))
return PTR_ERR(ce);
- err = eb_pin_context(eb, ce);
+ err = __eb_pin_engine(eb, ce);
intel_context_put(ce);
return err;
@@ -2427,25 +2497,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
- /*
- * Take a local wakeref for preparing to dispatch the execbuf as
- * we expect to access the hardware fairly frequently in the
- * process. Upon first dispatch, we acquire another prolonged
- * wakeref that we hold until the GPU has been idle for at least
- * 100ms.
- */
- intel_gt_pm_get(eb.i915);
+ err = eb_pin_engine(&eb, file, args);
+ if (unlikely(err))
+ goto err_context;
err = i915_mutex_lock_interruptible(dev);
if (err)
- goto err_rpm;
-
- err = eb_select_engine(&eb, file, args);
- if (unlikely(err))
- goto err_unlock;
-
- err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
- if (unlikely(err))
goto err_engine;
err = eb_relocate(&eb);
@@ -2572,6 +2629,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* to explicitly hold another reference here.
*/
eb.request->batch = eb.batch;
+ if (eb.batch->private)
+ intel_engine_pool_mark_active(eb.batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
@@ -2596,15 +2655,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
i915_vma_unpin(eb.batch);
+ if (eb.batch->private)
+ intel_engine_pool_put(eb.batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
-err_engine:
- eb_unpin_context(&eb);
-err_unlock:
mutex_unlock(&dev->struct_mutex);
-err_rpm:
- intel_gt_pm_put(eb.i915);
+err_engine:
+ eb_unpin_engine(&eb);
+err_context:
i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index cf0439e6be83..2f6100ec2608 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
+ 0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->base.resv, NULL,
@@ -78,7 +77,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
I915_FENCE_GFP) < 0)
goto err;
- reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 39a661927d8e..261c9bd83f51 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -7,12 +7,14 @@
#include <linux/mman.h>
#include <linux/sizes.h>
+#include "gt/intel_gt.h"
+
#include "i915_drv.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
+#include "i915_trace.h"
#include "i915_vma.h"
-#include "intel_drv.h"
static inline bool
__vma_matches(struct vm_area_struct *vma, struct file *filp,
@@ -99,9 +101,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
up_write(&mm->mmap_sem);
if (IS_ERR_VALUE(addr))
goto err;
-
- /* This may race, but that's ok, it only gets set */
- WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
i915_gem_object_put(obj);
@@ -246,7 +245,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
wakeref = intel_runtime_pm_get(rpm);
- srcu = i915_reset_trylock(i915);
+ srcu = intel_gt_reset_trylock(ggtt->vm.gt);
if (srcu < 0) {
ret = srcu;
goto err_rpm;
@@ -265,15 +264,15 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
- flags = PIN_MAPPABLE;
+ flags = PIN_MAPPABLE | PIN_NOSEARCH;
if (view.type == I915_GGTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
@@ -281,10 +280,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
- obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- if (IS_ERR(vma) && !view.type) {
+ if (IS_ERR(vma)) {
flags = PIN_MAPPABLE;
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
@@ -308,14 +306,17 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_fence;
- /* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(rpm);
+
+ /* Mark as being mmapped into userspace for later revocation */
+ mutex_lock(&i915->ggtt.vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
+ mutex_unlock(&i915->ggtt.vm.mutex);
+
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
- GEM_BUG_ON(!obj->userfault_count);
i915_vma_set_ggtt_write(vma);
@@ -326,7 +327,7 @@ err_unpin:
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_reset:
- i915_reset_unlock(i915, srcu);
+ intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
intel_runtime_pm_put(rpm, wakeref);
i915_gem_object_unpin_pages(obj);
@@ -339,7 +340,7 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(i915))
+ if (!intel_gt_is_wedged(ggtt->vm.gt))
return VM_FAULT_SIGBUS;
/* else, fall through */
case -EAGAIN:
@@ -410,8 +411,8 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
* requirement that operations to the GGTT be made holding the RPM
* wakeref.
*/
- lockdep_assert_held(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ mutex_lock(&i915->ggtt.vm.mutex);
if (!obj->userfault_count)
goto out;
@@ -428,6 +429,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
+ mutex_unlock(&i915->ggtt.vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index be6caccce0c5..d7855dc5a5c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -23,12 +23,13 @@
*/
#include "display/intel_frontbuffer.h"
-
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
+#include "i915_trace.h"
static struct i915_global_object {
struct i915_global base;
@@ -45,16 +46,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
return kmem_cache_free(global.slab_objects, obj);
}
-static void
-frontbuffer_retire(struct i915_active_request *active,
- struct i915_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, typeof(*obj), frontbuffer_write);
-
- intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
@@ -63,17 +54,14 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
+ INIT_LIST_HEAD(&obj->mm.link);
+
INIT_LIST_HEAD(&obj->lut_list);
- INIT_LIST_HEAD(&obj->batch_pool_link);
init_rcu_head(&obj->rcu);
obj->ops = ops;
- obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- i915_active_request_init(&obj->frontbuffer_write,
- NULL, frontbuffer_retire);
-
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
@@ -146,6 +134,19 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
}
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ dma_resv_fini(&obj->base._resv);
+ i915_gem_object_free(obj);
+
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
@@ -160,7 +161,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
@@ -169,110 +169,70 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->vma.list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
- /*
- * This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- if (i915_gem_object_has_pages(obj) &&
- i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_del_init(&obj->mm.link);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
- GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
- if (obj->ops->release)
- obj->ops->release(obj);
-
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
+ bitmap_free(obj->bit_17);
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
- drm_gem_object_release(&obj->base);
+ drm_gem_free_mmap_offset(&obj->base);
- bitmap_free(obj->bit_17);
- i915_gem_object_free(obj);
-
- GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
- atomic_dec(&i915->mm.free_count);
+ if (obj->ops->release)
+ obj->ops->release(obj);
- cond_resched();
+ /* But keep the pointer alive for RCU-protected lookups */
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
- struct llist_node *freed;
-
- /* Free the oldest, most stale object to keep the free_list short */
- freed = NULL;
- if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
- /* Only one consumer of llist_del_first() allowed */
- spin_lock(&i915->mm.free_lock);
- freed = llist_del_first(&i915->mm.free_list);
- spin_unlock(&i915->mm.free_lock);
- }
- if (unlikely(freed)) {
- freed->next = NULL;
+ struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+ if (unlikely(freed))
__i915_gem_free_objects(i915, freed);
- }
}
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work);
- struct llist_node *freed;
- /*
- * All file-owned VMA should have been released by this point through
- * i915_gem_close_object(), or earlier by i915_gem_context_close().
- * However, the object may also be bound into the global GTT (e.g.
- * older GPUs without per-process support, or for direct access through
- * the GTT either for the user or for scanout). Those VMA still need to
- * unbound now.
- */
-
- spin_lock(&i915->mm.free_lock);
- while ((freed = llist_del_all(&i915->mm.free_list))) {
- spin_unlock(&i915->mm.free_lock);
-
- __i915_gem_free_objects(i915, freed);
- if (need_resched())
- return;
-
- spin_lock(&i915->mm.free_lock);
- }
- spin_unlock(&i915->mm.free_lock);
+ i915_gem_flush_free_objects(i915);
}
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_i915_gem_object *obj =
- container_of(head, typeof(*obj), rcu);
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+
+ /*
+ * Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
+ */
+ atomic_inc(&i915->mm.free_count);
+
/*
- * We reuse obj->rcu for the freed list, so we had better not treat
- * it like a rcu_head from this point forwards. And we expect all
- * objects to be freed via this path.
+ * This serializes freeing with the shrinker. Since the free
+ * is delayed, first by RCU then by the workqueue, we want the
+ * shrinker to be able to free pages of unreferenced objects,
+ * or else we may oom whilst there are plenty of deferred
+ * freed objects.
*/
- destroy_rcu_head(&obj->rcu);
+ i915_gem_object_make_unshrinkable(obj);
/*
* Since we require blocking on struct_mutex to unbind the freed
@@ -288,27 +248,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
queue_work(i915->wq, &i915->mm.free_work);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- /*
- * Before we free the object, make sure any pure RCU-only
- * read-side critical sections are complete, e.g.
- * i915_gem_busy_ioctl(). For the corresponding synchronized
- * lookup see i915_gem_object_lookup_rcu().
- */
- atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-}
-
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
@@ -319,7 +258,6 @@ void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
assert_object_held(obj);
@@ -329,10 +267,10 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
- i915_gem_flush_ggtt_writes(dev_priv);
+ for_each_ggtt_vma(vma, obj)
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
- intel_fb_obj_flush(obj,
- fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
for_each_ggtt_vma(vma, obj) {
if (vma->iomap)
@@ -340,6 +278,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
i915_vma_unset_ggtt_write(vma);
}
+
break;
case I915_GEM_DOMAIN_WC:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfebd5706f16..5efb9936e05b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -81,7 +81,7 @@ i915_gem_object_lookup(struct drm_file *file, u32 handle)
}
__deprecated
-extern struct drm_gem_object *
+struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *file, u32 handle);
__attribute__((nonnull))
@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base);
}
-#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
- reservation_object_lock(obj->base.resv, NULL);
+ dma_resv_lock(obj->base.resv, NULL);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
- return reservation_object_lock_interruptible(obj->base.resv, NULL);
+ return dma_resv_lock_interruptible(obj->base.resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
- reservation_object_unlock(obj->base.resv);
+ dma_resv_unlock(obj->base.resv);
}
struct dma_fence *
@@ -159,15 +159,9 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
}
static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return READ_ONCE(obj->active_count);
-}
-
-static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
- return READ_ONCE(obj->framebuffer_references);
+ return READ_ONCE(obj->frontbuffer);
}
static inline unsigned int
@@ -373,7 +367,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
@@ -400,6 +394,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
+
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->cache_dirty)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index cb42e3a312e2..6415f9a17e2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -3,59 +3,136 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_gem_object_blt.h"
-
+#include "i915_drv.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "gt/intel_gt.h"
#include "i915_gem_clflush.h"
-#include "intel_drv.h"
+#include "i915_gem_object_blt.h"
-int intel_emit_vma_fill_blt(struct i915_request *rq,
- struct i915_vma *vma,
- u32 value)
+struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 value)
{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 8);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (INTEL_GEN(rq->i915) >= 8) {
- *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
- *cs++ = 0;
- *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
- *cs++ = value;
- *cs++ = MI_NOOP;
- } else {
- *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
- *cs++ = 0;
- *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = vma->node.start;
- *cs++ = value;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
+ struct drm_i915_private *i915 = ce->vm->i915;
+ const u32 block_size = S16_MAX * PAGE_SIZE;
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *batch;
+ u64 offset;
+ u64 count;
+ u64 rem;
+ u32 size;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
+ intel_engine_pm_get(ce->engine);
+
+ count = div_u64(vma->size, block_size);
+ size = (1 + 8 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ pool = intel_engine_pool_get(&ce->engine->pool, size);
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+ goto out_pm;
+ }
+
+ cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_put;
+ }
+
+ rem = vma->size;
+ offset = vma->node.start;
+
+ do {
+ u32 size = min_t(u64, rem, block_size);
+
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = value;
+ } else {
+ *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = offset;
+ *cmd++ = value;
+ }
+
+ /* Allow ourselves to be preempted in between blocks. */
+ *cmd++ = MI_ARB_CHECK;
+
+ offset += size;
+ rem -= size;
+ } while (rem);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(ce->vm->gt);
+
+ i915_gem_object_unpin_map(pool->obj);
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
}
- intel_ring_advance(rq, cs);
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
+ batch->private = pool;
+ return batch;
- return 0;
+out_put:
+ intel_engine_pool_put(pool);
+out_pm:
+ intel_engine_pm_put(ce->engine);
+ return ERR_PTR(err);
+}
+
+int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+ if (unlikely(err))
+ return err;
+
+ return intel_engine_pool_mark_active(vma->private, rq);
+}
+
+void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
+{
+ i915_vma_unpin(vma);
+ intel_engine_pool_put(vma->private);
+ intel_engine_pm_put(ce->engine);
}
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
+ struct i915_vma *batch;
struct i915_vma *vma;
int err;
- /* XXX: ce->vm please */
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -69,12 +146,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
}
- rq = i915_request_create(ce);
+ batch = intel_emit_vma_fill_blt(ce, vma, value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
+
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
err = i915_request_await_object(rq, obj, true);
if (unlikely(err))
goto out_request;
@@ -86,22 +173,229 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
}
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, value);
+ err = ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err))
i915_request_skip(rq, err);
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(ce, batch);
out_unpin:
i915_vma_unpin(vma);
return err;
}
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_vma *src,
+ struct i915_vma *dst)
+{
+ struct drm_i915_private *i915 = ce->vm->i915;
+ const u32 block_size = S16_MAX * PAGE_SIZE;
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *batch;
+ u64 src_offset, dst_offset;
+ u64 count, rem;
+ u32 size, *cmd;
+ int err;
+
+ GEM_BUG_ON(src->size != dst->size);
+
+ GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
+ intel_engine_pm_get(ce->engine);
+
+ count = div_u64(dst->size, block_size);
+ size = (1 + 11 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ pool = intel_engine_pool_get(&ce->engine->pool, size);
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+ goto out_pm;
+ }
+
+ cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_put;
+ }
+
+ rem = src->size;
+ src_offset = src->node.start;
+ dst_offset = dst->node.start;
+
+ do {
+ size = min_t(u64, rem, block_size);
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ if (INTEL_GEN(i915) >= 9) {
+ *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
+ *cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(dst_offset);
+ *cmd++ = upper_32_bits(dst_offset);
+ *cmd++ = 0;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = lower_32_bits(src_offset);
+ *cmd++ = upper_32_bits(src_offset);
+ } else if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(dst_offset);
+ *cmd++ = upper_32_bits(dst_offset);
+ *cmd++ = 0;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = lower_32_bits(src_offset);
+ *cmd++ = upper_32_bits(src_offset);
+ } else {
+ *cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
+ *cmd++ = dst_offset;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = src_offset;
+ }
+
+ /* Allow ourselves to be preempted in between blocks. */
+ *cmd++ = MI_ARB_CHECK;
+
+ src_offset += size;
+ dst_offset += size;
+ rem -= size;
+ } while (rem);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(ce->vm->gt);
+
+ i915_gem_object_unpin_map(pool->obj);
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
+ batch->private = pool;
+ return batch;
+
+out_put:
+ intel_engine_pool_put(pool);
+out_pm:
+ intel_engine_pm_put(ce->engine);
+ return ERR_PTR(err);
+}
+
+static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+
+ return i915_request_await_object(rq, obj, write);
+}
+
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct intel_context *ce)
+{
+ struct drm_gem_object *objs[] = { &src->base, &dst->base };
+ struct i915_address_space *vm = ce->vm;
+ struct i915_vma *vma[2], *batch;
+ struct ww_acquire_ctx acquire;
+ struct i915_request *rq;
+ int err, i;
+
+ vma[0] = i915_vma_instance(src, vm, NULL);
+ if (IS_ERR(vma[0]))
+ return PTR_ERR(vma[0]);
+
+ err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
+ if (unlikely(err))
+ return err;
+
+ vma[1] = i915_vma_instance(dst, vm, NULL);
+ if (IS_ERR(vma[1]))
+ goto out_unpin_src;
+
+ err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_unpin_src;
+
+ batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin_dst;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
+ err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+ if (unlikely(err))
+ goto out_request;
+
+ for (i = 0; i < ARRAY_SIZE(vma); i++) {
+ err = move_to_gpu(vma[i], rq, i);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vma); i++) {
+ unsigned int flags = i ? EXEC_OBJECT_WRITE : 0;
+
+ err = i915_vma_move_to_active(vma[i], rq, flags);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
+out_unlock:
+ drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+out_request:
+ if (unlikely(err))
+ i915_request_skip(rq, err);
+
+ i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(ce, batch);
+out_unpin_dst:
+ i915_vma_unpin(vma[1]);
+out_unpin_src:
+ i915_vma_unpin(vma[0]);
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_object_blt.c"
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
index 7ec7de6ac0c0..243a43a87824 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -8,17 +8,30 @@
#include <linux/types.h>
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "i915_vma.h"
+
struct drm_i915_gem_object;
-struct intel_context;
-struct i915_request;
-struct i915_vma;
-int intel_emit_vma_fill_blt(struct i915_request *rq,
- struct i915_vma *vma,
- u32 value);
+struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 value);
+
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_vma *src,
+ struct i915_vma *dst);
+
+int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
+void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value);
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct intel_context *ce);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 18bf4f8d6d80..ede0eb4218a8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -13,6 +13,7 @@
#include "i915_selftest.h"
struct drm_i915_gem_object;
+struct intel_fronbuffer;
/*
* struct i915_lut_handle tracks the fast lookups from handle to vma used
@@ -114,7 +115,6 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
- struct list_head batch_pool_link;
I915_SELFTEST_DECLARE(struct list_head st_link);
/*
@@ -142,9 +142,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
- struct i915_active_request frontbuffer_write;
+ struct intel_frontbuffer *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
@@ -154,7 +152,6 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- unsigned int active_count;
/** Count of how many global VMA are currently pinned for use by HW */
unsigned int pin_global;
@@ -226,9 +223,6 @@ struct drm_i915_gem_object {
bool quirked:1;
} mm;
- /** References from framebuffers, locks out tiling changes. */
- unsigned int framebuffer_references;
-
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 65eb430cedba..18f0ce0135c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -153,24 +153,13 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- list_del(&obj->mm.link);
- i915->mm.shrink_count--;
- i915->mm.shrink_memory -= obj->base.size;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
+ i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
void *ptr;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 2deac933cf59..768356908160 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -13,6 +13,7 @@
#include <drm/drm_legacy.h> /* for drm_pci.h! */
#include <drm/drm_pci.h>
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
@@ -60,7 +61,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(to_i915(obj->base.dev));
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
@@ -132,16 +133,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
drm_pci_free(obj->base.dev, obj->phys_handle);
}
-static void
-i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+static void phys_release(struct drm_i915_gem_object *obj)
{
- i915_gem_object_unpin_pages(obj);
+ fput(obj->base.filp);
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
- .release = i915_gem_object_release_phys,
+
+ .release = phys_release,
};
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
@@ -158,7 +159,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops != &i915_gem_shmem_ops)
return -EINVAL;
- err = i915_gem_object_unbind(obj);
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 914b5d4112bb..92e53c25424c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
@@ -33,12 +34,9 @@ static void i915_gem_park(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, i915, id)
call_idle_barriers(engine); /* cleanup after wedging */
- i915_gem_batch_pool_fini(&engine->batch_pool);
- }
- i915_timelines_park(i915);
i915_vma_parked(i915);
i915_globals_park();
@@ -54,7 +52,8 @@ static void idle_work_handler(struct work_struct *work)
mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref);
- park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
+ park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
+ !work_pending(work));
intel_wakeref_unlock(&i915->gt.wakeref);
if (park)
i915_gem_park(i915);
@@ -105,18 +104,18 @@ static int pm_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
+static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
- bool result = !i915_terminally_wedged(i915);
+ bool result = !intel_gt_is_wedged(gt);
do {
- if (i915_gem_wait_for_idle(i915,
+ if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
- dev_err(i915->drm.dev,
+ dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
@@ -125,18 +124,20 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
result = false;
}
- } while (i915_retire_requests(i915) && result);
+ } while (i915_retire_requests(gt->i915) && result);
+
+ if (intel_gt_pm_wait_for_idle(gt))
+ result = false;
- GEM_BUG_ON(i915->gt.awake);
return result;
}
bool i915_gem_load_power_context(struct drm_i915_private *i915)
{
- return switch_to_kernel_context_sync(i915);
+ return switch_to_kernel_context_sync(&i915->gt);
}
void i915_gem_suspend(struct drm_i915_private *i915)
@@ -157,22 +158,15 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- switch_to_kernel_context_sync(i915);
+ switch_to_kernel_context_sync(&i915->gt);
mutex_unlock(&i915->drm.struct_mutex);
- /*
- * Assert that we successfully flushed all the work and
- * reset the GPU back to its idle, low power state.
- */
- GEM_BUG_ON(i915->gt.awake);
- flush_work(&i915->gem.idle_work);
-
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);
- intel_uc_suspend(i915);
+ intel_uc_suspend(&i915->gt.uc);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@@ -237,7 +231,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- intel_uc_sanitize(i915);
i915_gem_sanitize(i915);
}
@@ -245,8 +238,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
- WARN_ON(i915->gt.awake);
-
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
@@ -261,10 +252,10 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- if (intel_gt_resume(i915))
+ if (intel_gt_resume(&i915->gt))
goto err_wedged;
- intel_uc_resume(i915);
+ intel_uc_resume(&i915->gt.uc);
/* Always reload a context for powersaving. */
if (!i915_gem_load_power_context(i915))
@@ -276,10 +267,10 @@ out_unlock:
return;
err_wedged:
- if (!i915_reset_failed(i915)) {
+ if (!intel_gt_is_wedged(&i915->gt)) {
dev_err(i915->drm.dev,
"Failed to re-initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
}
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 19d9ecdb2894..4c4954e8ce0a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_trace.h"
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
@@ -414,6 +415,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
+static void shmem_release(struct drm_i915_gem_object *obj)
+{
+ fput(obj->base.filp);
+}
+
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
@@ -424,6 +430,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.writeback = shmem_writeback,
.pwrite = shmem_pwrite,
+
+ .release = shmem_release,
};
static int create_shmem(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3a926a8755c6..edd21d14e64f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -88,10 +88,18 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
+ unsigned long shrink)
{
- if (i915_gem_object_unbind(obj) == 0)
+ unsigned long flags;
+
+ flags = 0;
+ if (shrink & I915_SHRINK_ACTIVE)
+ flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+
+ if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+
return !i915_gem_object_has_pages(obj);
}
@@ -169,7 +177,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
trace_i915_gem_shrink(i915, target, shrink);
- i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
@@ -230,8 +237,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
continue;
if (!(shrink & I915_SHRINK_ACTIVE) &&
- (i915_gem_object_is_active(obj) ||
- i915_gem_object_is_framebuffer(obj)))
+ i915_gem_object_is_framebuffer(obj))
continue;
if (!(shrink & I915_SHRINK_BOUND) &&
@@ -246,7 +252,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- if (unsafe_drop_pages(obj)) {
+ if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
@@ -269,8 +275,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- i915_retire_requests(i915);
-
shrinker_unlock(i915, unlock);
if (nr_scanned)
@@ -427,12 +431,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
- /* Force everything onto the inactive lists */
- if (i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT))
- goto out;
-
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
@@ -455,20 +453,13 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
}
mutex_unlock(&i915->ggtt.vm.mutex);
-out:
shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
-/**
- * i915_gem_shrinker_register - Register the i915 shrinker
- * @i915: i915 device
- *
- * This function registers and sets up the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_register(struct drm_i915_private *i915)
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
{
i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
@@ -483,13 +474,7 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915)
WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
-/**
- * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
- * @i915: i915 device
- *
- * This function unregisters the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
@@ -533,3 +518,61 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
if (unlock)
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
}
+
+#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
+{
+ /*
+ * We can only be called while the pages are pinned or when
+ * the pages are released. If pinned, we should only be called
+ * from a single caller under controlled conditions; and on release
+ * only one caller may release us. Neither the two may cross.
+ */
+ if (!list_empty(&obj->mm.link)) { /* pinned by caller */
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(list_empty(&obj->mm.link));
+
+ list_del_init(&obj->mm.link);
+ i915->mm.shrink_count--;
+ i915->mm.shrink_memory -= obj->base.size;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
+ struct list_head *head)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(!kref_read(&obj->base.refcount));
+
+ list_add_tail(&obj->mm.link, head);
+ i915->mm.shrink_count++;
+ i915->mm.shrink_memory += obj->base.size;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.shrink_list);
+}
+
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.purge_list);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
new file mode 100644
index 000000000000..b397d7785789
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_SHRINKER_H__
+#define __I915_GEM_SHRINKER_H__
+
+#include <linux/bits.h>
+
+struct drm_i915_private;
+struct mutex;
+
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *i915,
+ unsigned long target,
+ unsigned long *nr_scanned,
+ unsigned flags);
+#define I915_SHRINK_UNBOUND BIT(0)
+#define I915_SHRINK_BOUND BIT(1)
+#define I915_SHRINK_ACTIVE BIT(2)
+#define I915_SHRINK_VMAPS BIT(3)
+#define I915_SHRINK_WRITEBACK BIT(4)
+
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+ struct mutex *mutex);
+
+#endif /* __I915_GEM_SHRINKER_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index de1fab2058ec..aa533b4ab5f5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -11,6 +11,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_stolen.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -362,12 +363,16 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->mm.stolen_lock);
if (intel_vgpu_active(dev_priv)) {
- DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+ dev_notice(dev_priv->drm.dev,
+ "%s, disabling use of stolen memory\n",
+ "iGVT-g active");
return 0;
}
if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
- DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ dev_notice(dev_priv->drm.dev,
+ "%s, disabling use of stolen memory\n",
+ "DMAR active");
return 0;
}
@@ -529,8 +534,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!stolen);
- __i915_gem_object_unpin_pages(obj);
-
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
new file mode 100644
index 000000000000..2289644d8604
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_STOLEN_H__
+#define __I915_GEM_STOLEN_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_mm_node;
+struct drm_i915_gem_object;
+
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment);
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment, u64 start,
+ u64 end);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node);
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
+void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size);
+
+#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index adb3074d9ce2..1e372420771b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -41,7 +41,7 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
long ret;
/* ABI: return -EIO if already wedged */
- ret = i915_terminally_wedged(to_i915(dev));
+ ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 528b61678334..11b231c187c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -12,11 +12,10 @@
#include <drm/i915_drm.h>
+#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
struct i915_mm_struct {
struct mm_struct *mm;
@@ -150,7 +149,8 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
}
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj,
+ I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
@@ -662,6 +662,14 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
+ /*
+ * We always mark objects as dirty when they are used by the GPU,
+ * just in case. However, if we set the vma as being read-only we know
+ * that the object will never have been written to.
+ */
+ if (i915_gem_object_is_readonly(obj))
+ obj->mm.dirty = false;
+
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 26ec6579b7cd..8af55cd3e690 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -31,11 +31,10 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
}
static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
+i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
long timeout)
{
- unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
bool prune_fences = false;
@@ -44,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -73,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
*/
prune_fences = count && timeout >= 0;
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (excl && timeout >= 0)
@@ -83,15 +82,12 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
/*
* Opportunistically prune the fences iff we know they have *all* been
- * signaled and that the reservation object has not been changed (i.e.
- * no new fences have been added).
+ * signaled.
*/
- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
- if (reservation_object_trylock(resv)) {
- if (!__read_seqcount_retry(&resv->seq, seq))
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
+ if (prune_fences && dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
}
return timeout;
@@ -144,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -156,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 099f3397aada..5e6e8c91ab38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -20,31 +20,18 @@ int i915_gemfs_init(struct drm_i915_private *i915)
if (!type)
return -ENODEV;
- gemfs = kern_mount(type);
- if (IS_ERR(gemfs))
- return PTR_ERR(gemfs);
-
/*
- * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
- * likely 2M. Note that within_size may overallocate huge-pages, if say
- * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
- * memory pressure shmem should split any huge-pages which can be
- * shrunk.
+ * By creating our own shmemfs mountpoint, we can pass in
+ * mount flags that better match our usecase.
+ *
+ * One example, although it is probably better with a per-file
+ * control, is selecting huge page allocations ("huge=within_size").
+ * Currently unused due to bandwidth issues (slow reads) on Broadwell+.
*/
- if (has_transparent_hugepage()) {
- struct super_block *sb = gemfs->mnt_sb;
- /* FIXME: Disabled until we get W/A for read BW issue. */
- char options[] = "huge=never";
- int flags = 0;
- int err;
-
- err = sb->s_op->remount_fs(sb, &flags, options);
- if (err) {
- kern_unmount(gemfs);
- return err;
- }
- }
+ gemfs = kern_mount(type);
+ if (IS_ERR(gemfs))
+ return PTR_ERR(gemfs);
i915->mm.gemfs = gemfs;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b74729b6f353..8de83c6d81f5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -10,6 +10,8 @@
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
#include "igt_gem_utils.h"
#include "mock_context.h"
@@ -877,126 +879,22 @@ out_object_put:
return err;
}
-static struct i915_vma *
-gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- const int gen = INTEL_GEN(i915);
- unsigned int count = vma->size >> PAGE_SHIFT;
- struct drm_i915_gem_object *obj;
- struct i915_vma *batch;
- unsigned int size;
- u32 *cmd;
- int n;
- int err;
-
- size = (1 + 4 * count) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = val;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = val;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = val;
- }
-
- offset += PAGE_SIZE;
- }
-
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
-
- i915_gem_object_unpin_map(obj);
-
- batch = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return batch;
-
-err:
- i915_gem_object_put(obj);
-
- return ERR_PTR(err);
-}
-
static int gpu_write(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
- u32 dword,
- u32 value)
+ u32 dw,
+ u32 val)
{
- struct i915_request *rq;
- struct i915_vma *batch;
int err;
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
-
- batch = gpu_write_dw(vma, dword * sizeof(u32), value);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
+ i915_gem_object_lock(vma->obj);
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ i915_gem_object_unlock(vma->obj);
if (err)
- goto err_request;
-
- i915_vma_lock(vma);
- err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (err == 0)
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- if (err)
- goto err_request;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
-err_request:
- if (err)
- i915_request_skip(rq, err);
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ return err;
- return err;
+ return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
+ vma->size >> PAGE_SHIFT, val);
}
static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
@@ -1037,8 +935,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1421,6 +1318,9 @@ static int igt_ppgtt_pin_update(void *arg)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int n;
int first, last;
int err;
@@ -1518,11 +1418,20 @@ static int igt_ppgtt_pin_update(void *arg)
* land in the now stale 2M page.
*/
- err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
- if (err)
- goto out_unpin;
+ n = 0;
+ for_each_engine(engine, dev_priv, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
- err = cpu_check(obj, 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
+ while (n--) {
+ err = cpu_check(obj, n, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
out_unpin:
i915_vma_unpin(vma);
@@ -1598,8 +1507,11 @@ static int igt_shrink_thp(void *arg)
struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
+ unsigned int n;
int err;
/*
@@ -1635,9 +1547,15 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_unpin;
- err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
- if (err)
- goto out_unpin;
+ n = 0;
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
i915_vma_unpin(vma);
@@ -1662,7 +1580,12 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_close;
- err = cpu_check(obj, 0, 0xdeadbeaf);
+ while (n--) {
+ err = cpu_check(obj, n, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
+
out_unpin:
i915_vma_unpin(vma);
@@ -1726,7 +1649,7 @@ out_unlock:
return err;
}
-int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_shrink_thp),
@@ -1741,22 +1664,22 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
intel_wakeref_t wakeref;
int err;
- if (!HAS_PPGTT(dev_priv)) {
+ if (!HAS_PPGTT(i915)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- file = mock_file(dev_priv);
+ file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- ctx = live_context(dev_priv, file);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -1768,10 +1691,10 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(dev_priv, file);
+ mock_file_free(i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index f3a5eb807c1c..d8804a847945 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,14 +5,17 @@
#include "i915_selftest.h"
+#include "gt/intel_gt.h"
+
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
+#include "huge_gem_object.h"
#include "mock_context.h"
static int igt_client_fill(void *arg)
{
- struct intel_context *ce = arg;
- struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_private *i915 = arg;
+ struct intel_context *ce = i915->engine[BCS0]->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -22,15 +25,19 @@ static int igt_client_fill(void *arg)
prandom_seed_state(&prng, i915_selftest.random_seed);
do {
- u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
+ u32 phys_sz = sz % (max_block_size + 1);
u32 val = prandom_u32_state(&prng);
u32 i;
sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
- pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
- obj = i915_gem_object_create_internal(i915, sz);
+ obj = huge_gem_object(i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -52,7 +59,8 @@ static int igt_client_fill(void *arg)
* values after we do the set_to_cpu_domain and pick it up as a
* test failure.
*/
- memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(obj) / sizeof(u32));
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
@@ -63,24 +71,13 @@ static int igt_client_fill(void *arg)
if (err)
goto err_unpin;
- /*
- * XXX: For now do the wait without the object resv lock to
- * ensure we don't deadlock.
- */
- err = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto err_unpin;
-
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
goto err_unpin;
- for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -100,11 +97,6 @@ err_unpin:
err_put:
i915_gem_object_put(obj);
err_flush:
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
if (err == -ENOMEM)
err = 0;
@@ -117,11 +109,11 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_client_fill),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
- return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 8f22d3f18422..0ff7a89aadca 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -6,6 +6,8 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_gt.h"
+
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -226,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
intel_ring_advance(rq, cs);
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_vma_unpin(vma);
@@ -242,12 +246,15 @@ static bool always_valid(struct drm_i915_private *i915)
static bool needs_fence_registers(struct drm_i915_private *i915)
{
- return !i915_terminally_wedged(i915);
+ return !intel_gt_is_wedged(&i915->gt);
}
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
+ return false;
+
+ if (!HAS_ENGINE(i915, RCS0))
return false;
return intel_engine_can_store_dword(i915->engine[RCS0]);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index eaa2b16574c7..3e6f4a65d356 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -31,7 +32,6 @@ static int live_nop_switch(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct drm_file *file;
unsigned long n;
@@ -53,7 +53,6 @@ static int live_nop_switch(void *arg)
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -85,7 +84,7 @@ static int live_nop_switch(void *arg)
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto out_unlock;
}
@@ -129,7 +128,7 @@ static int live_nop_switch(void *arg)
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
break;
}
@@ -152,76 +151,11 @@ static int live_nop_switch(void *arg)
}
out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
-static struct i915_vma *
-gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
-{
- struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(vma->vm->i915);
- unsigned long n, size;
- u32 *cmd;
- int err;
-
- size = (4 * count + 1) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(vma->vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = value;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = value;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = value;
- }
- offset += PAGE_SIZE;
- }
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
static unsigned long real_page_count(struct drm_i915_gem_object *obj)
{
return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
@@ -237,12 +171,8 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
struct intel_engine_cs *engine,
unsigned int dw)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
- struct i915_request *rq;
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_vma *vma;
- struct i915_vma *batch;
- unsigned int flags;
int err;
GEM_BUG_ON(obj->base.size > vm->total);
@@ -253,7 +183,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
return PTR_ERR(vma);
i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
return err;
@@ -262,70 +192,23 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err)
return err;
- /* Within the GTT the huge objects maps every page onto
+ /*
+ * Within the GTT the huge objects maps every page onto
* its 1024 real pages (using phys_pfn = dma_pfn % 1024).
* We set the nth dword within the page using the nth
* mapping via the GTT - this should exercise the GTT mapping
* whilst checking that each context provides a unique view
* into the object.
*/
- batch = gpu_fill_dw(vma,
- (dw * real_page_count(obj)) << PAGE_SHIFT |
- (dw * sizeof(u32)),
- real_page_count(obj),
- dw);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_vma;
- }
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
- flags |= I915_DISPATCH_SECURE;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
- if (err)
- goto err_request;
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- if (err)
- goto skip_request;
-
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- if (err)
- goto skip_request;
-
- i915_request_add(rq);
-
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ err = igt_gpu_fill_dw(vma,
+ ctx,
+ engine,
+ (dw * real_page_count(obj)) << PAGE_SHIFT |
+ (dw * sizeof(u32)),
+ real_page_count(obj),
+ dw);
i915_vma_unpin(vma);
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
-err_vma:
- i915_vma_unpin(vma);
return err;
}
@@ -431,6 +314,9 @@ create_test_object(struct i915_gem_context *ctx,
u64 size;
int err;
+ /* Keep in GEM's good graces */
+ i915_retire_requests(ctx->i915);
+
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
@@ -507,7 +393,6 @@ static int igt_ctx_exec(void *arg)
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
@@ -523,8 +408,7 @@ static int igt_ctx_exec(void *arg)
}
}
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -565,6 +449,8 @@ out_unlock:
mock_file_free(i915, file);
if (err)
return err;
+
+ i915_gem_drain_freed_objects(i915);
}
return 0;
@@ -623,7 +509,6 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -642,9 +527,7 @@ static int igt_shared_ctx_exec(void *arg)
}
}
- err = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -678,6 +561,10 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem;
}
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(i915);
+ mutex_lock(&i915->drm.struct_mutex);
}
out_test:
if (igt_live_test_end(&t))
@@ -746,7 +633,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -779,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_request;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -820,8 +711,7 @@ err_vma:
#define TEST_RESET BIT(2)
static int
-__sseu_prepare(struct drm_i915_private *i915,
- const char *name,
+__sseu_prepare(const char *name,
unsigned int flags,
struct intel_context *ce,
struct igt_spinner **spin)
@@ -837,14 +727,11 @@ __sseu_prepare(struct drm_i915_private *i915,
if (!*spin)
return -ENOMEM;
- ret = igt_spinner_init(*spin, i915);
+ ret = igt_spinner_init(*spin, ce->engine->gt);
if (ret)
goto err_free;
- rq = igt_spinner_create_request(*spin,
- ce->gem_context,
- ce->engine,
- MI_NOOP);
+ rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto err_fini;
@@ -870,8 +757,7 @@ err_free:
}
static int
-__read_slice_count(struct drm_i915_private *i915,
- struct intel_context *ce,
+__read_slice_count(struct intel_context *ce,
struct drm_i915_gem_object *obj,
struct igt_spinner *spin,
u32 *rpcs)
@@ -900,7 +786,7 @@ __read_slice_count(struct drm_i915_private *i915,
return ret;
}
- if (INTEL_GEN(i915) >= 11) {
+ if (INTEL_GEN(ce->engine->i915) >= 11) {
s_mask = GEN11_RPCS_S_CNT_MASK;
s_shift = GEN11_RPCS_S_CNT_SHIFT;
} else {
@@ -943,8 +829,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
}
static int
-__sseu_finish(struct drm_i915_private *i915,
- const char *name,
+__sseu_finish(const char *name,
unsigned int flags,
struct intel_context *ce,
struct drm_i915_gem_object *obj,
@@ -956,19 +841,18 @@ __sseu_finish(struct drm_i915_private *i915,
int ret = 0;
if (flags & TEST_RESET) {
- ret = i915_reset_engine(ce->engine, "sseu");
+ ret = intel_engine_reset(ce->engine, "sseu");
if (ret)
goto out;
}
- ret = __read_slice_count(i915, ce, obj,
+ ret = __read_slice_count(ce, obj,
flags & TEST_RESET ? NULL : spin, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
if (ret)
goto out;
- ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
- NULL, &rpcs);
+ ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
out:
@@ -976,11 +860,12 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
+ ret = i915_gem_wait_for_idle(ce->engine->i915,
+ 0, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
- ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
+ ret = __read_slice_count(ce, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected,
"Context", " after idle!");
}
@@ -989,8 +874,7 @@ out:
}
static int
-__sseu_test(struct drm_i915_private *i915,
- const char *name,
+__sseu_test(const char *name,
unsigned int flags,
struct intel_context *ce,
struct drm_i915_gem_object *obj,
@@ -999,7 +883,7 @@ __sseu_test(struct drm_i915_private *i915,
struct igt_spinner *spin = NULL;
int ret;
- ret = __sseu_prepare(i915, name, flags, ce, &spin);
+ ret = __sseu_prepare(name, flags, ce, &spin);
if (ret)
return ret;
@@ -1007,7 +891,7 @@ __sseu_test(struct drm_i915_private *i915,
if (ret)
goto out_spin;
- ret = __sseu_finish(i915, name, flags, ce, obj,
+ ret = __sseu_finish(name, flags, ce, obj,
hweight32(sseu.slice_mask), spin);
out_spin:
@@ -1025,35 +909,33 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
unsigned int flags)
{
struct intel_engine_cs *engine = i915->engine[RCS0];
- struct intel_sseu default_sseu = engine->sseu;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_context *ce;
struct intel_sseu pg_sseu;
- intel_wakeref_t wakeref;
struct drm_file *file;
int ret;
- if (INTEL_GEN(i915) < 9)
+ if (INTEL_GEN(i915) < 9 || !engine)
return 0;
if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
return 0;
- if (hweight32(default_sseu.slice_mask) < 2)
+ if (hweight32(engine->sseu.slice_mask) < 2)
return 0;
/*
* Gen11 VME friendly power-gated configuration with half enabled
* sub-slices.
*/
- pg_sseu = default_sseu;
+ pg_sseu = engine->sseu;
pg_sseu.slice_mask = 1;
pg_sseu.subslice_mask =
- ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
+ ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
- name, flags, hweight32(default_sseu.slice_mask),
+ name, flags, hweight32(engine->sseu.slice_mask),
hweight32(pg_sseu.slice_mask));
file = mock_file(i915);
@@ -1061,7 +943,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
return PTR_ERR(file);
if (flags & TEST_RESET)
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
@@ -1078,12 +960,10 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_unlock;
}
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
ce = i915_gem_context_get_engine(ctx, RCS0);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- goto out_rpm;
+ goto out_put;
}
ret = intel_context_pin(ce);
@@ -1091,22 +971,22 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_context;
/* First set the default mask. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
if (ret)
goto out_fail;
/* Then set a power-gated configuration. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
/* Back to defaults. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
if (ret)
goto out_fail;
/* One last power-gated configuration for the road. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
@@ -1117,15 +997,14 @@ out_fail:
intel_context_unpin(ce);
out_context:
intel_context_put(ce);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+out_put:
i915_gem_object_put(obj);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
if (flags & TEST_RESET)
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
mock_file_free(i915, file);
@@ -1194,7 +1073,7 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock;
}
- vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
+ vm = ctx->vm ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
goto out_unlock;
@@ -1207,8 +1086,6 @@ static int igt_ctx_readonly(void *arg)
unsigned int id;
for_each_engine(engine, i915, id) {
- intel_wakeref_t wakeref;
-
if (!intel_engine_can_store_dword(engine))
continue;
@@ -1223,9 +1100,7 @@ static int igt_ctx_readonly(void *arg)
i915_gem_object_set_readonly(obj);
}
- err = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -1347,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -1444,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -1488,7 +1367,6 @@ static int igt_vm_isolation(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
@@ -1535,8 +1413,6 @@ static int igt_vm_isolation(void *arg)
GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
@@ -1551,7 +1427,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= -sizeof(u32);
+ offset = round_down(offset, alignof_dword);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1560,7 +1436,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
- goto out_rpm;
+ goto out_unlock;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1569,7 +1445,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_rpm;
+ goto out_unlock;
}
this++;
@@ -1579,8 +1455,6 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %d engines\n",
count, RUNTIME_INFO(i915)->num_engines);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out_unlock:
if (igt_live_test_end(&t))
err = -EIO;
@@ -1736,7 +1610,7 @@ int i915_gem_context_mock_selftests(void)
return err;
}
-int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_switch),
@@ -1747,8 +1621,8 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, dev_priv);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index e3a64edef918..d85d1ce273ca 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -20,7 +20,7 @@ static int igt_dmabuf_export(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
@@ -44,7 +44,7 @@ static int igt_dmabuf_import_self(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -219,7 +219,7 @@ static int igt_dmabuf_export_vmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -266,7 +266,7 @@ static int igt_dmabuf_export_kmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
err = PTR_ERR(dmabuf);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5c81f4b4813a..1d27babff0ce 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
@@ -143,7 +144,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
continue;
- i915_gem_flush_ggtt_writes(to_i915(obj->base.dev));
+ intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -327,7 +328,8 @@ out:
static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_request *rq;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct i915_vma *vma;
int err;
@@ -339,17 +341,24 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
- if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
- }
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_vma_unpin(vma);
+ return PTR_ERR(rq);
+ }
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq,
+ EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
- i915_request_add(rq);
+ i915_request_add(rq);
+ }
i915_vma_unpin(vma);
i915_gem_object_put(obj); /* leave it only alive via its active ref */
@@ -376,9 +385,9 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
- i915_gem_shrinker_unregister(i915);
+ i915_gem_driver_unregister__shrinker(i915);
- intel_gt_pm_get(i915);
+ intel_gt_pm_get(&i915->gt);
cancel_delayed_work_sync(&i915->gem.retire_work);
flush_work(&i915->gem.idle_work);
@@ -386,13 +395,25 @@ static void disable_retire_worker(struct drm_i915_private *i915)
static void restore_retire_worker(struct drm_i915_private *i915)
{
- intel_gt_pm_put(i915);
+ intel_gt_pm_put(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_shrinker_register(i915);
+ i915_gem_driver_register__shrinker(i915);
+}
+
+static void mmap_offset_lock(struct drm_i915_private *i915)
+ __acquires(&i915->drm.vma_offset_manager->vm_lock)
+{
+ write_lock(&i915->drm.vma_offset_manager->vm_lock);
+}
+
+static void mmap_offset_unlock(struct drm_i915_private *i915)
+ __releases(&i915->drm.vma_offset_manager->vm_lock)
+{
+ write_unlock(&i915->drm.vma_offset_manager->vm_lock);
}
static int igt_mmap_offset_exhaustion(void *arg)
@@ -413,7 +434,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
resv.start = hole_start;
resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
+ mmap_offset_lock(i915);
err = drm_mm_reserve_node(mm, &resv);
+ mmap_offset_unlock(i915);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
goto out_park;
@@ -458,7 +481,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -474,19 +497,12 @@ static int igt_mmap_offset_exhaustion(void *arg)
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
}
-
- /* NB we rely on the _active_ reference to access obj now */
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- err = create_mmap_offset(obj);
- if (err) {
- pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
- loop, err);
- goto out;
- }
}
out:
+ mmap_offset_lock(i915);
drm_mm_remove_node(&resv);
+ mmap_offset_unlock(i915);
out_park:
restore_retire_worker(i915);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index e23d8c9e9298..c21d747e7d05 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -3,16 +3,19 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gt/intel_gt.h"
+
#include "i915_selftest.h"
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
+#include "huge_gem_object.h"
#include "mock_context.h"
static int igt_fill_blt(void *arg)
{
- struct intel_context *ce = arg;
- struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_private *i915 = arg;
+ struct intel_context *ce = i915->engine[BCS0]->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -21,16 +24,26 @@ static int igt_fill_blt(void *arg)
prandom_seed_state(&prng, i915_selftest.random_seed);
+ /*
+ * XXX: needs some threads to scale all these tests, also maybe throw
+ * in submission from higher priority context to see if we are
+ * preempted for very large objects...
+ */
+
do {
- u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
+ u32 phys_sz = sz % (max_block_size + 1);
u32 val = prandom_u32_state(&prng);
u32 i;
sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
- pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
- obj = i915_gem_object_create_internal(i915, sz);
+ obj = huge_gem_object(i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -46,7 +59,8 @@ static int igt_fill_blt(void *arg)
* Make sure the potentially async clflush does its job, if
* required.
*/
- memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(obj) / sizeof(u32));
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
@@ -63,7 +77,7 @@ static int igt_fill_blt(void *arg)
if (err)
goto err_unpin;
- for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -83,11 +97,111 @@ err_unpin:
err_put:
i915_gem_object_put(obj);
err_flush:
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+static int igt_copy_blt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct drm_i915_gem_object *src, *dst;
+ struct rnd_state prng;
+ IGT_TIMEOUT(end);
+ u32 *vaddr;
+ int err = 0;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ do {
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
+ u32 phys_sz = sz % (max_block_size + 1);
+ u32 val = prandom_u32_state(&prng);
+ u32 i;
+
+ sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
+
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
+
+ src = huge_gem_object(i915, phys_sz, sz);
+ if (IS_ERR(src)) {
+ err = PTR_ERR(src);
+ goto err_flush;
+ }
+
+ vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put_src;
+ }
+
+ memset32(vaddr, val,
+ huge_gem_object_phys_size(src) / sizeof(u32));
+
+ i915_gem_object_unpin_map(src);
+
+ if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ src->cache_dirty = true;
+ dst = huge_gem_object(i915, phys_sz, sz);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_put_src;
+ }
+
+ vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put_dst;
+ }
+
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(dst) / sizeof(u32));
+
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ dst->cache_dirty = true;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_copy_blt(src, dst, ce);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_lock(dst);
+ err = i915_gem_object_set_to_cpu_domain(dst, false);
+ i915_gem_object_unlock(dst);
+ if (err)
+ goto err_unpin;
+
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ if (vaddr[i] != val) {
+ pr_err("vaddr[%u]=%x, expected=%x\n", i,
+ vaddr[i], val);
+ err = -EINVAL;
+ goto err_unpin;
+ }
+ }
+
+ i915_gem_object_unpin_map(dst);
+
+ i915_gem_object_put(src);
+ i915_gem_object_put(dst);
+ } while (!time_after(jiffies, end));
+
+ goto err_flush;
+
+err_unpin:
+ i915_gem_object_unpin_map(dst);
+err_put_dst:
+ i915_gem_object_put(dst);
+err_put_src:
+ i915_gem_object_put(src);
+err_flush:
if (err == -ENOMEM)
err = 0;
@@ -98,13 +212,14 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_fill_blt),
+ SUBTEST(igt_copy_blt),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
- return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index b232e6d2cd92..57ece53c1075 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,8 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "i915_vma.h"
+#include "i915_drv.h"
#include "i915_request.h"
@@ -23,7 +25,7 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ce = i915_gem_context_get_engine(ctx, engine->id);
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (IS_ERR(ce))
return ERR_CAST(ce);
@@ -32,3 +34,140 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
return rq;
}
+
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val)
+{
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned long n, size;
+ u32 *cmd;
+ int err;
+
+ size = (4 * count + 1) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vma->vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = val;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? MI_USE_GGTT : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = val;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cmd++ = offset;
+ *cmd++ = val;
+ }
+ offset += PAGE_SIZE;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset,
+ unsigned long count,
+ u32 val)
+{
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ unsigned int flags;
+ int err;
+
+ GEM_BUG_ON(vma->size > vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ batch = igt_emit_store_dw(vma, offset, count, val);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ flags = 0;
+ if (INTEL_GEN(vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto skip_request;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ goto skip_request;
+
+ i915_request_add(rq);
+
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+ i915_vma_put(batch);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 0f17251cf75d..361a7ef866b0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -7,11 +7,27 @@
#ifndef __IGT_GEM_UTILS_H__
#define __IGT_GEM_UTILS_H__
+#include <linux/types.h>
+
struct i915_request;
struct i915_gem_context;
struct intel_engine_cs;
+struct i915_vma;
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val);
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset,
+ unsigned long count,
+ u32 val);
+
#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
index 1c75b5c9790c..7e73aa587967 100644
--- a/drivers/gpu/drm/i915/gt/Makefile
+++ b/drivers/gpu/drm/i915/gt/Makefile
@@ -1,2 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
# Extra header tests
-include $(src)/Makefile.header-test
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/Makefile.header-test b/drivers/gpu/drm/i915/gt/Makefile.header-test
deleted file mode 100644
index 61e06cbb4b32..000000000000
--- a/drivers/gpu/drm/i915/gt/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(wildcard $(src)/*.h))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
index 11c8e7b3dd7c..11c8e7b3dd7c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
index 655180646152..655180646152 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
index 95288a34c15d..95288a34c15d 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
index 7d3ac02f0177..7d3ac02f0177 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c
+++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index c092bdf5f0bf..09c68dda2098 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -27,6 +27,7 @@
#include <uapi/linux/sched/types.h>
#include "i915_drv.h"
+#include "i915_trace.h"
static void irq_enable(struct intel_engine_cs *engine)
{
@@ -34,9 +35,9 @@ static void irq_enable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->gt->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
@@ -45,9 +46,9 @@ static void irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->gt->irq_lock);
engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->gt->irq_lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
@@ -66,14 +67,15 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
if (!b->irq_armed)
return;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_armed)
__intel_breadcrumbs_disarm_irq(b);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
static inline bool __request_completed(const struct i915_request *rq)
@@ -112,18 +114,18 @@ __dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
}
static void
-__dma_fence_signal__notify(struct dma_fence *fence)
+__dma_fence_signal__notify(struct dma_fence *fence,
+ const struct list_head *list)
{
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
lockdep_assert_irqs_disabled();
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
cur->func(fence, cur);
}
- INIT_LIST_HEAD(&fence->cb_list);
}
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
@@ -185,11 +187,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
-
- __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ struct list_head cb_list;
spin_lock(&rq->lock);
- __dma_fence_signal__notify(&rq->fence);
+ list_replace(&rq->fence.cb_list, &cb_list);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ __dma_fence_signal__notify(&rq->fence, &cb_list);
spin_unlock(&rq->lock);
i915_request_put(rq);
@@ -211,28 +214,6 @@ static void signal_irq_work(struct irq_work *work)
intel_engine_breadcrumbs_irq(engine);
}
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock_irq(&b->irq_lock);
- if (!b->irq_enabled++)
- irq_enable(engine);
- GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
- spin_unlock_irq(&b->irq_lock);
-}
-
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock_irq(&b->irq_lock);
- GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
- if (!--b->irq_enabled)
- irq_disable(engine);
- spin_unlock_irq(&b->irq_lock);
-}
-
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 23120901c55f..f55691d151ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -53,12 +53,24 @@ int __intel_context_do_pin(struct intel_context *ce)
if (likely(!atomic_read(&ce->pin_count))) {
intel_wakeref_t wakeref;
+ if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+ err = ce->ops->alloc(ce);
+ if (unlikely(err))
+ goto err;
+
+ __set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+ }
+
err = 0;
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
+ GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
+ ce->engine->name, ce->timeline->fence_context,
+ ce->ring->head, ce->ring->tail);
+
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
smp_mb__before_atomic(); /* flush pin before it is visible */
@@ -85,6 +97,9 @@ void intel_context_unpin(struct intel_context *ce)
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ GEM_TRACE("%s context:%llx retire\n",
+ ce->engine->name, ce->timeline->fence_context);
+
ce->ops->unpin(ce);
i915_gem_context_put(ce->gem_context);
@@ -95,11 +110,15 @@ void intel_context_unpin(struct intel_context *ce)
intel_context_put(ce);
}
-static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
+static int __context_pin_state(struct i915_vma *vma)
{
+ u64 flags;
int err;
- err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
+ flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
+ flags |= PIN_HIGH | PIN_GLOBAL;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
if (err)
return err;
@@ -107,7 +126,7 @@ static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
*/
- vma->obj->pin_global++;
+ i915_vma_make_unshrinkable(vma);
vma->obj->mm.dirty = true;
return 0;
@@ -115,83 +134,79 @@ static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
static void __context_unpin_state(struct i915_vma *vma)
{
- vma->obj->pin_global--;
__i915_vma_unpin(vma);
+ i915_vma_make_shrinkable(vma);
}
-static void intel_context_retire(struct i915_active *active)
+static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
+ GEM_TRACE("%s context:%llx retire\n",
+ ce->engine->name, ce->timeline->fence_context);
+
if (ce->state)
__context_unpin_state(ce->state);
+ intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(!engine->cops);
-
- kref_init(&ce->ref);
-
- ce->gem_context = ctx;
- ce->engine = engine;
- ce->ops = engine->cops;
- ce->sseu = engine->sseu;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- mutex_init(&ce->pin_mutex);
-
- i915_active_init(ctx->i915, &ce->active, intel_context_retire);
-}
-
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
+static int __intel_context_active(struct i915_active *active)
{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
int err;
- if (!i915_active_acquire(&ce->active))
- return 0;
-
intel_context_get(ce);
err = intel_ring_pin(ce->ring);
if (err)
goto err_put;
+ err = intel_timeline_pin(ce->timeline);
+ if (err)
+ goto err_ring;
+
if (!ce->state)
return 0;
- err = __context_pin_state(ce->state, flags);
+ err = __context_pin_state(ce->state);
if (err)
- goto err_ring;
-
- /* Preallocate tracking nodes */
- if (!i915_gem_context_is_kernel(ce->gem_context)) {
- err = i915_active_acquire_preallocate_barrier(&ce->active,
- ce->engine);
- if (err)
- goto err_state;
- }
+ goto err_timeline;
return 0;
-err_state:
- __context_unpin_state(ce->state);
+err_timeline:
+ intel_timeline_unpin(ce->timeline);
err_ring:
intel_ring_unpin(ce->ring);
err_put:
intel_context_put(ce);
- i915_active_cancel(&ce->active);
return err;
}
+int intel_context_active_acquire(struct intel_context *ce)
+{
+ int err;
+
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ return err;
+
+ /* Preallocate tracking nodes */
+ if (!i915_gem_context_is_kernel(ce->gem_context)) {
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err) {
+ i915_active_release(&ce->active);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
void intel_context_active_release(struct intel_context *ce)
{
/* Nodes preallocated in intel_context_active() */
@@ -199,6 +214,44 @@ void intel_context_active_release(struct intel_context *ce)
i915_active_release(&ce->active);
}
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!engine->cops);
+
+ kref_init(&ce->ref);
+
+ ce->gem_context = ctx;
+ ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+ if (ctx->timeline)
+ ce->timeline = intel_timeline_get(ctx->timeline);
+
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->sseu = engine->sseu;
+ ce->ring = __intel_context_ring_size(SZ_16K);
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ i915_active_init(ctx->i915, &ce->active,
+ __intel_context_active, __intel_context_retire);
+}
+
+void intel_context_fini(struct intel_context *ce)
+{
+ if (ce->timeline)
+ intel_timeline_put(ce->timeline);
+ i915_vm_put(ce->vm);
+
+ mutex_destroy(&ce->pin_mutex);
+ i915_active_fini(&ce->active);
+}
+
static void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_ce);
@@ -227,13 +280,48 @@ int __init i915_global_context_init(void)
void intel_context_enter_engine(struct intel_context *ce)
{
intel_engine_pm_get(ce->engine);
+ intel_timeline_enter(ce->timeline);
}
void intel_context_exit_engine(struct intel_context *ce)
{
+ intel_timeline_exit(ce->timeline);
intel_engine_pm_put(ce->engine);
}
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err;
+
+ /* Only suitable for use in remotely modifying this context */
+ GEM_BUG_ON(rq->hw_context == ce);
+
+ if (rq->timeline != tl) { /* beware timeline sharing */
+ err = mutex_lock_interruptible_nested(&tl->mutex,
+ SINGLE_DEPTH_NESTING);
+ if (err)
+ return err;
+
+ /* Queue this switch after current activity by this context. */
+ err = i915_active_request_set(&tl->last_request, rq);
+ mutex_unlock(&tl->mutex);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Guarantee context image and the timeline remains pinned until the
+ * modifying request is retired by setting the ce activity tracker.
+ *
+ * But we only need to take one pin on the account of it. Or in other
+ * words transfer the pinned ce object to tracked active request.
+ */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ return i915_active_ref(&ce->active, rq->timeline, rq);
+}
+
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
struct i915_request *rq;
@@ -248,3 +336,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
return rq;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_context.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a47275bc4f01..dd742ac2fbdb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -9,12 +9,15 @@
#include <linux/lockdep.h>
+#include "i915_active.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+#include "intel_timeline_types.h"
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
+void intel_context_fini(struct intel_context *ce);
struct intel_context *
intel_context_create(struct i915_gem_context *ctx,
@@ -86,23 +89,26 @@ void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
if (!ce->active_count++)
ce->ops->enter(ce);
}
static inline void intel_context_mark_active(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
++ce->active_count;
}
static inline void intel_context_exit(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
GEM_BUG_ON(!ce->active_count);
if (!--ce->active_count)
ce->ops->exit(ce);
}
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags);
+int intel_context_active_acquire(struct intel_context *ce);
void intel_context_active_release(struct intel_context *ce);
static inline struct intel_context *intel_context_get(struct intel_context *ce)
@@ -116,19 +122,34 @@ static inline void intel_context_put(struct intel_context *ce)
kref_put(&ce->ref, ce->ops->destroy);
}
-static inline int __must_check
+static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context *ce)
- __acquires(&ce->ring->timeline->mutex)
+ __acquires(&ce->timeline->mutex)
{
- return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+ struct intel_timeline *tl = ce->timeline;
+ int err;
+
+ err = mutex_lock_interruptible(&tl->mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ return tl;
}
-static inline void intel_context_timeline_unlock(struct intel_context *ce)
- __releases(&ce->ring->timeline->mutex)
+static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
+ __releases(&tl->mutex)
{
- mutex_unlock(&ce->ring->timeline->mutex);
+ mutex_unlock(&tl->mutex);
}
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq);
+
struct i915_request *intel_context_create_request(struct intel_context *ce);
+static inline struct intel_ring *__intel_context_ring_size(u64 sz)
+{
+ return u64_to_ptr(struct intel_ring, sz);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 08049ee91cee..bf9cedfccbf0 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include "i915_active_types.h"
+#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
@@ -22,6 +23,8 @@ struct intel_context;
struct intel_ring;
struct intel_context_ops {
+ int (*alloc)(struct intel_context *ce);
+
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
@@ -35,20 +38,28 @@ struct intel_context_ops {
struct intel_context {
struct kref ref;
- struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
+#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+
+ struct i915_address_space *vm;
+ struct i915_gem_context *gem_context;
struct list_head signal_link;
struct list_head signals;
struct i915_vma *state;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
+
+ unsigned long flags;
+#define CONTEXT_ALLOC_BIT 0
u32 *lrc_reg_state;
u64 lrc_desc;
- unsigned int active_count; /* notionally protected by timeline->mutex */
+ unsigned int active_count; /* protected by timeline->mutex */
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 2f1c6871ee95..d3c6993f4f46 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -9,12 +9,11 @@
#include <linux/random.h>
#include <linux/seqlock.h>
-#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
@@ -51,7 +50,7 @@ struct drm_printer;
#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
-#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
@@ -123,73 +122,23 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
return "unknown";
}
-void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
-
-static inline void
-execlists_set_active(struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- __set_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline bool
-execlists_set_active_once(struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline void
-execlists_clear_active(struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- __clear_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline void
-execlists_clear_all_active(struct intel_engine_execlists *execlists)
-{
- execlists->active = 0;
-}
-
-static inline bool
-execlists_is_active(const struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- return test_bit(bit, (unsigned long *)&execlists->active);
-}
-
-void execlists_user_begin(struct intel_engine_execlists *execlists,
- const struct execlist_port *port);
-void execlists_user_end(struct intel_engine_execlists *execlists);
-
-void
-execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
-
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
-
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
return execlists->port_mask + 1;
}
-static inline struct execlist_port *
-execlists_port_complete(struct intel_engine_execlists * const execlists,
- struct execlist_port * const port)
+static inline struct i915_request *
+execlists_active(const struct intel_engine_execlists *execlists)
{
- const unsigned int m = execlists->port_mask;
-
- GEM_BUG_ON(port_index(port, execlists) != 0);
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
-
- memmove(port, port + 1, m * sizeof(struct execlist_port));
- memset(port + m, 0, sizeof(struct execlist_port));
-
- return port;
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ return READ_ONCE(*execlists->active);
}
+struct i915_request *
+execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
+
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -244,9 +193,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct i915_timeline *timeline,
- int size);
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
unsigned int intel_ring_update_space(struct intel_ring *ring);
@@ -388,9 +335,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-
void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
@@ -456,8 +400,8 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
return cs;
}
-static inline void intel_engine_reset(struct intel_engine_cs *engine,
- bool stalled)
+static inline void __intel_engine_reset(struct intel_engine_cs *engine,
+ bool stalled)
{
if (engine->reset.reset)
engine->reset.reset(engine, stalled);
@@ -465,10 +409,9 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
}
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
+bool intel_engines_are_idle(struct intel_gt *gt);
-void intel_engines_reset_default_submission(struct drm_i915_private *i915);
-unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
+void intel_engines_reset_default_submission(struct intel_gt *gt);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
@@ -477,9 +420,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
-
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f25632c9b292..82630db0394b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -28,8 +28,12 @@
#include "i915_drv.h"
+#include "gt/intel_gt.h"
+
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+#include "intel_engine_user.h"
#include "intel_context.h"
#include "intel_lrc.h"
#include "intel_reset.h"
@@ -51,30 +55,6 @@
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
-struct engine_class_info {
- const char *name;
- u8 uabi_class;
-};
-
-static const struct engine_class_info intel_engine_classes[] = {
- [RENDER_CLASS] = {
- .name = "rcs",
- .uabi_class = I915_ENGINE_CLASS_RENDER,
- },
- [COPY_ENGINE_CLASS] = {
- .name = "bcs",
- .uabi_class = I915_ENGINE_CLASS_COPY,
- },
- [VIDEO_DECODE_CLASS] = {
- .name = "vcs",
- .uabi_class = I915_ENGINE_CLASS_VIDEO,
- },
- [VIDEO_ENHANCEMENT_CLASS] = {
- .name = "vecs",
- .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
- },
-};
-
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
@@ -184,6 +164,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
default:
MISSING_CASE(INTEL_GEN(dev_priv));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
+ case 12:
case 11:
return GEN11_LR_CONTEXT_RENDER_SIZE;
case 10:
@@ -255,11 +236,16 @@ static u32 __engine_mmio_base(struct drm_i915_private *i915,
return bases[i].base;
}
-static void __sprint_engine_name(char *name, const struct engine_info *info)
+static void __sprint_engine_name(struct intel_engine_cs *engine)
{
- WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
- intel_engine_classes[info->class].name,
- info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
+ /*
+ * Before we know what the uABI name for this engine will be,
+ * we still would like to keep track of this engine in the debug logs.
+ * We throw in a ' here as a reminder that this isn't its final name.
+ */
+ GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
+ intel_engine_class_repr(engine->class),
+ engine->instance) >= sizeof(engine->name));
}
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
@@ -283,15 +269,11 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u);
}
-static int
-intel_engine_setup(struct drm_i915_private *dev_priv,
- enum intel_engine_id id)
+static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
struct intel_engine_cs *engine;
- GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
-
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
@@ -301,10 +283,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
- if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
return -EINVAL;
- GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
return -ENOMEM;
@@ -313,13 +294,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->id = id;
engine->mask = BIT(id);
- engine->i915 = dev_priv;
- engine->uncore = &dev_priv->uncore;
- __sprint_engine_name(engine->name, info);
+ engine->i915 = gt->i915;
+ engine->gt = gt;
+ engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+
engine->class = info->class;
engine->instance = info->instance;
+ __sprint_engine_name(engine);
/*
* To be overridden by the backend on setup. However to facilitate
@@ -327,14 +310,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
*/
engine->destroy = (typeof(engine->destroy))kfree;
- engine->uabi_class = intel_engine_classes[info->class].uabi_class;
-
- engine->context_size = intel_engine_context_size(dev_priv,
+ engine->context_size = intel_engine_context_size(gt->i915,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
+ DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -346,8 +327,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Scrub mmio state on takeover */
intel_engine_sanitize_mmio(engine);
- dev_priv->engine_class[info->class][info->instance] = engine;
- dev_priv->engine[id] = engine;
+ gt->engine_class[info->class][info->instance] = engine;
+
+ intel_engine_add_user(engine);
+ gt->i915->engine[id] = engine;
+
return 0;
}
@@ -423,14 +407,14 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(i915))
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(i915, i);
+ err = intel_engine_setup(&i915->gt, i);
if (err)
goto cleanup;
@@ -445,15 +429,9 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
if (WARN_ON(mask != engine_mask))
device_info->engine_mask = mask;
- /* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
- err = -ENODEV;
- goto cleanup;
- }
-
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- i915_check_and_clear_faults(i915);
+ intel_gt_check_and_clear_faults(&i915->gt);
intel_setup_engine_capabilities(i915);
@@ -495,11 +473,6 @@ cleanup:
return err;
}
-static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
-{
- i915_gem_batch_pool_init(&engine->batch_pool, engine);
-}
-
void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -508,6 +481,10 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine)
GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+ memset(execlists->pending, 0, sizeof(execlists->pending));
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
+
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
}
@@ -577,7 +554,7 @@ static int init_status_page(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -621,14 +598,19 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine);
- intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
+ intel_engine_pool_init(&engine->pool);
+
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
+ intel_engine_init_workarounds(engine);
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_ctx_wa(engine);
+
return 0;
}
@@ -675,49 +657,9 @@ cleanup:
return err;
}
-void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
-{
- static const struct {
- u8 engine;
- u8 sched;
- } map[] = {
-#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
- MAP(PREEMPTION, PREEMPTION),
- MAP(SEMAPHORES, SEMAPHORES),
-#undef MAP
- };
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 enabled, disabled;
-
- enabled = 0;
- disabled = 0;
- for_each_engine(engine, i915, id) { /* all engines must agree! */
- int i;
-
- if (engine->schedule)
- enabled |= (I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY);
- else
- disabled |= (I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY);
-
- for (i = 0; i < ARRAY_SIZE(map); i++) {
- if (engine->flags & BIT(map[i].engine))
- enabled |= BIT(map[i].sched);
- else
- disabled |= BIT(map[i].sched);
- }
- }
-
- i915->caps.scheduler = enabled & ~disabled;
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
- i915->caps.scheduler = 0;
-}
-
struct measure_breadcrumb {
struct i915_request rq;
- struct i915_timeline timeline;
+ struct intel_timeline timeline;
struct intel_ring ring;
u32 cs[1024];
};
@@ -727,19 +669,17 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
struct measure_breadcrumb *frame;
int dw = -ENOMEM;
- GEM_BUG_ON(!engine->i915->gt.scratch);
+ GEM_BUG_ON(!engine->gt->scratch);
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame)
return -ENOMEM;
- if (i915_timeline_init(engine->i915,
- &frame->timeline,
- engine->status_page.vma))
+ if (intel_timeline_init(&frame->timeline,
+ engine->gt,
+ engine->status_page.vma))
goto out_frame;
- INIT_LIST_HEAD(&frame->ring.request_list);
- frame->ring.timeline = &frame->timeline;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
@@ -750,42 +690,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
frame->rq.ring = &frame->ring;
frame->rq.timeline = &frame->timeline;
- dw = i915_timeline_pin(&frame->timeline);
+ dw = intel_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
- i915_timeline_unpin(&frame->timeline);
+ intel_timeline_unpin(&frame->timeline);
out_timeline:
- i915_timeline_fini(&frame->timeline);
+ intel_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
return dw;
}
-static int pin_context(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context **out)
-{
- struct intel_context *ce;
- int err;
-
- ce = i915_gem_context_get_engine(ctx, engine->id);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
-
- err = intel_context_pin(ce);
- intel_context_put(ce);
- if (err)
- return err;
-
- *out = ce;
- return 0;
-}
-
void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
@@ -807,6 +727,27 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
#endif
}
+static struct intel_context *
+create_kernel_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine->i915->kernel_context, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ ce->ring = __intel_context_ring_size(SZ_4K);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return ERR_PTR(err);
+ }
+
+ return ce;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -820,29 +761,24 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
int ret;
- /* We may need to do things with the shrinker which
+ engine->set_default_submission(engine);
+
+ /*
+ * We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
- ret = pin_context(i915->kernel_context, engine,
- &engine->kernel_context);
- if (ret)
- return ret;
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
- /*
- * Similarly the preempt context must always be available so that
- * we can interrupt the engine at any time. However, as preemption
- * is optional, we allow it to fail.
- */
- if (i915->preempt_context)
- pin_context(i915->preempt_context, engine,
- &engine->preempt_context);
+ engine->kernel_context = ce;
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
@@ -850,14 +786,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
engine->emit_fini_breadcrumb_dw = ret;
- engine->set_default_submission(engine);
-
return 0;
err_unpin:
- if (engine->preempt_context)
- intel_context_unpin(engine->preempt_context);
- intel_context_unpin(engine->kernel_context);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
return ret;
}
@@ -874,16 +807,15 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
+ intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
- i915_gem_batch_pool_fini(&engine->batch_pool);
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (engine->preempt_context)
- intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
@@ -966,57 +898,23 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
}
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
-{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- unsigned int slice = fls(sseu->slice_mask) - 1;
- unsigned int subslice;
- u32 mcr_s_ss_select;
-
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = fls(sseu->subslice_mask[slice]);
- GEM_BUG_ON(!subslice);
- subslice--;
-
- if (IS_GEN(dev_priv, 10))
- mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
- GEN8_MCR_SUBSLICE(subslice);
- else if (INTEL_GEN(dev_priv) >= 11)
- mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
- GEN11_MCR_SUBSLICE(subslice);
- else
- mcr_s_ss_select = 0;
-
- return mcr_s_ss_select;
-}
-
static u32
read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
i915_reg_t reg)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
- u32 mcr_slice_subslice_mask;
- u32 mcr_slice_subslice_select;
- u32 default_mcr_s_ss_select;
- u32 mcr;
- u32 ret;
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
enum forcewake_domains fw_domains;
if (INTEL_GEN(i915) >= 11) {
- mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
- GEN11_MCR_SUBSLICE_MASK;
- mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
- GEN11_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
} else {
- mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
- GEN8_MCR_SUBSLICE_MASK;
- mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
- GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
}
- default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915);
-
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
@@ -1026,26 +924,23 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
- mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
-
- WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
- default_mcr_s_ss_select);
+ old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= mcr_slice_subslice_select;
+ mcr &= ~mcr_mask;
+ mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- ret = intel_uncore_read_fw(uncore, reg);
+ val = intel_uncore_read_fw(uncore, reg);
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= default_mcr_s_ss_select;
+ mcr &= ~mcr_mask;
+ mcr |= old_mcr & mcr_mask;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
- return ret;
+ return val;
}
/* NB: please notice the memset */
@@ -1113,16 +1008,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- intel_wakeref_t wakeref;
bool idle = true;
if (I915_SELFTEST_ONLY(!engine->mmio_base))
return true;
- /* If the whole device is asleep, the engine must be idle */
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
- if (!wakeref)
+ if (!intel_engine_pm_get_if_awake(engine))
return true;
/* First check that no commands are left in the ring */
@@ -1131,11 +1022,11 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
- if (INTEL_GEN(dev_priv) > 2 &&
+ if (INTEL_GEN(engine->i915) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_engine_pm_put(engine);
return idle;
}
@@ -1150,17 +1041,17 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
/* More white lies, if wedged, hw state is inconsistent */
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
return true;
- if (!intel_wakeref_active(&engine->wakeref))
+ if (!intel_engine_pm_is_awake(engine))
return true;
/* Waiting to drain ELSP? */
- if (READ_ONCE(engine->execlists.active)) {
+ if (execlists_active(&engine->execlists)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
- synchronize_hardirq(engine->i915->drm.irq);
+ synchronize_hardirq(engine->i915->drm.pdev->irq);
local_bh_disable();
if (tasklet_trylock(t)) {
@@ -1174,7 +1065,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
/* Otherwise flush the tasklet if it was on another cpu */
tasklet_unlock_wait(t);
- if (READ_ONCE(engine->execlists.active))
+ if (execlists_active(&engine->execlists))
return false;
}
@@ -1186,7 +1077,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return ring_is_idle(engine);
}
-bool intel_engines_are_idle(struct drm_i915_private *i915)
+bool intel_engines_are_idle(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1195,14 +1086,14 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return true;
/* Already parked (and passed an idleness test); must still be idle */
- if (!READ_ONCE(i915->gt.awake))
+ if (!READ_ONCE(gt->awake))
return true;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1210,12 +1101,12 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
return true;
}
-void intel_engines_reset_default_submission(struct drm_i915_private *i915)
+void intel_engines_reset_default_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt->i915, id)
engine->set_default_submission(engine);
}
@@ -1234,20 +1125,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int which;
-
- which = 0;
- for_each_engine(engine, i915, id)
- if (engine->default_state)
- which |= BIT(engine->uabi_class);
-
- return which;
-}
-
static int print_sched_attr(struct drm_i915_private *i915,
const struct i915_sched_attr *attr,
char *buf, int x, int len)
@@ -1325,7 +1202,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
unsigned long flags;
u64 addr;
- if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
@@ -1372,6 +1249,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
if (HAS_EXECLISTS(dev_priv)) {
+ struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
const u8 num_entries = execlists->csb_size;
@@ -1404,27 +1282,33 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
spin_lock_irqsave(&engine->active.lock, flags);
- for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
- struct i915_request *rq;
- unsigned int count;
+ for (port = execlists->active; (rq = *port); port++) {
+ char hdr[80];
+ int len;
+
+ len = snprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d: ",
+ (int)(port - execlists->active));
+ if (!i915_request_signaled(rq))
+ len += snprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->timeline->hwsp_offset,
+ hwsp_seqno(rq));
+ snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ print_request(m, rq, hdr);
+ }
+ for (port = execlists->pending; (rq = *port); port++) {
char hdr[80];
- rq = port_unpack(&execlists->port[idx], &count);
- if (!rq) {
- drm_printf(m, "\t\tELSP[%d] idle\n", idx);
- } else if (!i915_request_signaled(rq)) {
- snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
- idx, count,
- i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
- hwsp_seqno(rq));
- print_request(m, rq, hdr);
- } else {
- print_request(m, rq, "\t\tELSP[%d] rq: ");
- }
+ snprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
+ (int)(port - execlists->pending),
+ i915_ggtt_offset(rq->ring->vma),
+ rq->timeline->hwsp_offset,
+ hwsp_seqno(rq));
+ print_request(m, rq, hdr);
}
- drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
spin_unlock_irqrestore(&engine->active.lock, flags);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
@@ -1486,7 +1370,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
va_end(ap);
}
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
@@ -1520,6 +1404,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
}
spin_unlock_irqrestore(&engine->active.lock, flags);
+ drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
intel_engine_print_registers(engine, m);
@@ -1538,29 +1423,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-static u8 user_class_map[] = {
- [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
- [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
- [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
- [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
-};
-
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
-{
- if (class >= ARRAY_SIZE(user_class_map))
- return NULL;
-
- class = user_class_map[class];
-
- GEM_BUG_ON(class > MAX_ENGINE_CLASS);
-
- if (instance > MAX_ENGINE_INSTANCE)
- return NULL;
-
- return i915->engine_class[class][instance];
-}
-
/**
* intel_enable_engine_stats() - Enable engine busy tracking on engine
* @engine: engine to enable stats collection
@@ -1587,15 +1449,19 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
if (engine->stats.enabled++ == 0) {
- const struct execlist_port *port = execlists->port;
- unsigned int num_ports = execlists_num_ports(execlists);
+ struct i915_request * const *port;
+ struct i915_request *rq;
engine->stats.enabled_at = ktime_get();
/* XXX submission method oblivious? */
- while (num_ports-- && port_isset(port)) {
+ for (port = execlists->active; (rq = *port); port++)
engine->stats.active++;
- port++;
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ /* Exclude any contexts already counted in active */
+ if (!intel_context_inflight_count(rq->hw_context))
+ engine->stats.active++;
}
if (engine->stats.active)
@@ -1708,5 +1574,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "mock_engine.c"
+#include "selftest_engine.c"
#include "selftest_engine_cs.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ae5b6baf6dff..65b5ca74b394 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -8,6 +8,8 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
static int __engine_unpark(struct intel_wakeref *wf)
@@ -18,7 +20,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
- intel_gt_pm_get(engine->i915);
+ intel_gt_pm_get(engine->gt);
/* Pin the default state for fast resets from atomic context. */
map = NULL;
@@ -35,38 +37,51 @@ static int __engine_unpark(struct intel_wakeref *wf)
return 0;
}
-void intel_engine_pm_get(struct intel_engine_cs *engine)
+#if IS_ENABLED(CONFIG_LOCKDEP)
+
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
{
- intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
+
+ return flags;
}
-void intel_engine_park(struct intel_engine_cs *engine)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
{
- /*
- * We are committed now to parking this engine, make sure there
- * will be no more interrupts arriving later and the engine
- * is truly idle.
- */
- if (wait_for(intel_engine_is_idle(engine), 10)) {
- struct drm_printer p = drm_debug_printer(__func__);
+ mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+ local_irq_restore(flags);
+}
- dev_err(engine->i915->drm.dev,
- "%s is not idle before parking\n",
- engine->name);
- intel_engine_dump(engine, &p, NULL);
- }
+#else
+
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
+{
+ return 0;
+}
+
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
+{
}
+#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
+
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct i915_request *rq;
+ unsigned long flags;
+ bool result = true;
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
return true;
/* GPU is pointing to the void, as good as in the kernel context. */
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
return true;
/*
@@ -81,18 +96,31 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* retiring the last request, thus all rings should be empty and
* all timelines idle.
*/
+ flags = __timeline_mark_lock(engine->kernel_context);
+
rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
- return true;
+ goto out_unlock;
+
+ intel_timeline_enter(rq->timeline);
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
+ i915_request_add_active_barriers(rq);
- i915_request_add_barriers(rq);
+ /* Install ourselves as a preemption barrier */
+ rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
__i915_request_commit(rq);
- return false;
+ /* Release our exclusive hold on the engine */
+ __intel_wakeref_defer_park(&engine->wakeref);
+ __i915_request_queue(rq, NULL);
+
+ result = false;
+out_unlock:
+ __timeline_mark_unlock(engine->kernel_context, flags);
+ return result;
}
static int __engine_park(struct intel_wakeref *wf)
@@ -115,6 +143,7 @@ static int __engine_park(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
intel_engine_disarm_breadcrumbs(engine);
+ intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
@@ -129,16 +158,22 @@ static int __engine_park(struct intel_wakeref *wf)
engine->execlists.no_priolist = false;
- intel_gt_pm_put(engine->i915);
+ intel_gt_pm_put(engine->gt);
return 0;
}
-void intel_engine_pm_put(struct intel_engine_cs *engine)
-{
- intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
-}
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __engine_unpark,
+ .put = __engine_park,
+};
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref);
+ struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
+
+ intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index a11c893f64c6..739c50fefcef 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -10,18 +10,26 @@
#include "intel_engine_types.h"
#include "intel_wakeref.h"
-struct drm_i915_private;
+static inline bool
+intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
+{
+ return intel_wakeref_is_active(&engine->wakeref);
+}
-void intel_engine_pm_get(struct intel_engine_cs *engine);
-void intel_engine_pm_put(struct intel_engine_cs *engine);
+static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ intel_wakeref_get(&engine->wakeref);
+}
-static inline bool
-intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
{
return intel_wakeref_get_if_active(&engine->wakeref);
}
-void intel_engine_park(struct intel_engine_cs *engine);
+static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put(&engine->wakeref);
+}
void intel_engine_init__pm(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
new file mode 100644
index 000000000000..4cd54c569911
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -0,0 +1,177 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+
+static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+{
+ return container_of(pool, struct intel_engine_cs, pool);
+}
+
+static struct list_head *
+bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+{
+ int n;
+
+ /*
+ * Compute a power-of-two bucket, but throw everything greater than
+ * 16KiB into the same bucket: i.e. the buckets hold objects of
+ * (1 page, 2 pages, 4 pages, 8+ pages).
+ */
+ n = fls(sz >> PAGE_SHIFT) - 1;
+ if (n >= ARRAY_SIZE(pool->cache_list))
+ n = ARRAY_SIZE(pool->cache_list) - 1;
+
+ return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_engine_pool_node *node)
+{
+ i915_gem_object_put(node->obj);
+ i915_active_fini(&node->active);
+ kfree(node);
+}
+
+static int pool_active(struct i915_active *ref)
+{
+ struct intel_engine_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct dma_resv *resv = node->obj->base.resv;
+ int err;
+
+ if (dma_resv_trylock(resv)) {
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+
+ err = i915_gem_object_pin_pages(node->obj);
+ if (err)
+ return err;
+
+ /* Hide this pinned object from the shrinker until retired */
+ i915_gem_object_make_unshrinkable(node->obj);
+
+ return 0;
+}
+
+static void pool_retire(struct i915_active *ref)
+{
+ struct intel_engine_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct intel_engine_pool *pool = node->pool;
+ struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+ unsigned long flags;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+ i915_gem_object_unpin_pages(node->obj);
+
+ /* Return this object to the shrinker pool */
+ i915_gem_object_make_purgeable(node->obj);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add(&node->link, list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct intel_engine_pool_node *
+node_create(struct intel_engine_pool *pool, size_t sz)
+{
+ struct intel_engine_cs *engine = to_engine(pool);
+ struct intel_engine_pool_node *node;
+ struct drm_i915_gem_object *obj;
+
+ node = kmalloc(sizeof(*node),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->pool = pool;
+ i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+
+ obj = i915_gem_object_create_internal(engine->i915, sz);
+ if (IS_ERR(obj)) {
+ i915_active_fini(&node->active);
+ kfree(node);
+ return ERR_CAST(obj);
+ }
+
+ node->obj = obj;
+ return node;
+}
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+{
+ struct intel_engine_pool_node *node;
+ struct list_head *list;
+ unsigned long flags;
+ int ret;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+ size = PAGE_ALIGN(size);
+ list = bucket_for_size(pool, size);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry(node, list, link) {
+ if (node->obj->base.size < size)
+ continue;
+ list_del(&node->link);
+ break;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (&node->link == list) {
+ node = node_create(pool, size);
+ if (IS_ERR(node))
+ return node;
+ }
+
+ ret = i915_active_acquire(&node->active);
+ if (ret) {
+ node_free(node);
+ return ERR_PTR(ret);
+ }
+
+ return node;
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool)
+{
+ int n;
+
+ spin_lock_init(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+void intel_engine_pool_park(struct intel_engine_pool *pool)
+{
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct list_head *list = &pool->cache_list[n];
+ struct intel_engine_pool_node *node, *nn;
+
+ list_for_each_entry_safe(node, nn, list, link)
+ node_free(node);
+
+ INIT_LIST_HEAD(list);
+ }
+}
+
+void intel_engine_pool_fini(struct intel_engine_pool *pool)
+{
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
new file mode 100644
index 000000000000..8d069efd9457
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_H
+#define INTEL_ENGINE_POOL_H
+
+#include "intel_engine_pool_types.h"
+#include "i915_active.h"
+#include "i915_request.h"
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+
+static inline int
+intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
+ struct i915_request *rq)
+{
+ return i915_active_ref(&node->active, rq->timeline, rq);
+}
+
+static inline void
+intel_engine_pool_put(struct intel_engine_pool_node *node)
+{
+ i915_active_release(&node->active);
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool);
+void intel_engine_pool_park(struct intel_engine_pool *pool);
+void intel_engine_pool_fini(struct intel_engine_pool *pool);
+
+#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
new file mode 100644
index 000000000000..e31ee361b76f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_TYPES_H
+#define INTEL_ENGINE_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_engine_pool {
+ spinlock_t lock;
+ struct list_head cache_list[4];
+};
+
+struct intel_engine_pool_node {
+ struct i915_active active;
+ struct drm_i915_gem_object *obj;
+ struct list_head link;
+ struct intel_engine_pool *pool;
+};
+
+#endif /* INTEL_ENGINE_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 43e975a26016..a82cea95c2f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -12,18 +12,40 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/rbtree.h>
+#include <linux/timer.h>
#include <linux/types.h>
#include "i915_gem.h"
-#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
-#include "i915_timeline_types.h"
+#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
+#include "intel_timeline_types.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
+/* Legacy HW Engine ID */
+
+#define RCS0_HW 0
+#define VCS0_HW 1
+#define BCS0_HW 2
+#define VECS0_HW 3
+#define VCS1_HW 4
+#define VCS2_HW 6
+#define VCS3_HW 7
+#define VECS1_HW 12
+
+/* Gen11+ HW Engine class + instance */
+#define RENDER_CLASS 0
+#define VIDEO_DECODE_CLASS 1
+#define VIDEO_ENHANCEMENT_CLASS 2
+#define COPY_ENGINE_CLASS 3
+#define OTHER_CLASS 4
+#define MAX_ENGINE_CLASS 4
+#define MAX_ENGINE_INSTANCE 3
+
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
@@ -35,6 +57,7 @@ struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
+struct intel_gt;
struct intel_uncore;
typedef u8 intel_engine_mask_t;
@@ -66,10 +89,6 @@ struct intel_ring {
struct i915_vma *vma;
void *vaddr;
- struct i915_timeline *timeline;
- struct list_head request_list;
- struct list_head active_link;
-
/*
* As we have two types of rings, one global to the engine used
* by ringbuffer submission and those that are exclusive to a
@@ -150,6 +169,11 @@ struct intel_engine_execlists {
struct tasklet_struct tasklet;
/**
+ * @timer: kick the current context if its timeslice expires
+ */
+ struct timer_list timer;
+
+ /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
@@ -172,51 +196,28 @@ struct intel_engine_execlists {
*/
u32 __iomem *ctrl_reg;
+#define EXECLIST_MAX_PORTS 2
+ /**
+ * @active: the currently known context executing on HW
+ */
+ struct i915_request * const *active;
/**
- * @port: execlist port states
+ * @inflight: the set of contexts submitted and acknowleged by HW
*
- * For each hardware ELSP (ExecList Submission Port) we keep
- * track of the last request and the number of times we submitted
- * that port to hw. We then count the number of times the hw reports
- * a context completion or preemption. As only one context can
- * be active on hw, we limit resubmission of context to port[0]. This
- * is called Lite Restore, of the context.
+ * The set of inflight contexts is managed by reading CS events
+ * from the HW. On a context-switch event (not preemption), we
+ * know the HW has transitioned from port0 to port1, and we
+ * advance our inflight/active tracking accordingly.
*/
- struct execlist_port {
- /**
- * @request_count: combined request and submission count
- */
- struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
- /**
- * @context_id: context ID for port
- */
- GEM_DEBUG_DECL(u32 context_id);
-
-#define EXECLIST_MAX_PORTS 2
- } port[EXECLIST_MAX_PORTS];
-
+ struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
/**
- * @active: is the HW active? We consider the HW as active after
- * submitting any context for execution and until we have seen the
- * last context completion event. After that, we do not expect any
- * more events until we submit, and so can park the HW.
+ * @pending: the next set of contexts submitted to ELSP
*
- * As we have a small number of different sources from which we feed
- * the HW, we track the state of each inside a single bitfield.
+ * We store the array of contexts that we submit to HW (via ELSP) and
+ * promote them to the inflight array once HW has signaled the
+ * preemption or idle-to-active event.
*/
- unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
+ struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
/**
* @port_mask: number of execlist ports - 1
@@ -224,6 +225,16 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @switch_priority_hint: Second context priority.
+ *
+ * We submit multiple contexts to the HW simultaneously and would
+ * like to occasionally switch between them to emulate timeslicing.
+ * To know when timeslicing is suitable, we track the priority of
+ * the context submitted second.
+ */
+ int switch_priority_hint;
+
+ /**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
@@ -258,11 +269,6 @@ struct intel_engine_execlists {
u32 *csb_status;
/**
- * @preempt_complete_status: expected CSB upon completing preemption
- */
- u32 preempt_complete_status;
-
- /**
* @csb_size: context status buffer FIFO size
*/
u8 csb_size;
@@ -279,26 +285,32 @@ struct intel_engine_execlists {
struct intel_engine_cs {
struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct intel_uncore *uncore;
char name[INTEL_ENGINE_CS_MAX_NAME];
enum intel_engine_id id;
+ enum intel_engine_id legacy_idx;
+
unsigned int hw_id;
unsigned int guc_id;
- intel_engine_mask_t mask;
- u8 uabi_class;
+ intel_engine_mask_t mask;
u8 class;
u8 instance;
+
+ u8 uabi_class;
+ u8 uabi_instance;
+
u32 context_size;
u32 mmio_base;
u32 uabi_capabilities;
- struct intel_sseu sseu;
+ struct rb_node uabi_node;
- struct intel_ring *buffer;
+ struct intel_sseu sseu;
struct {
spinlock_t lock;
@@ -308,7 +320,6 @@ struct intel_engine_cs {
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
- struct intel_context *preempt_context; /* pinned; optional */
intel_engine_mask_t saturated; /* submitting semaphores too late? */
@@ -319,6 +330,11 @@ struct intel_engine_cs {
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
+ struct {
+ struct intel_ring *ring;
+ struct intel_timeline *timeline;
+ } legacy;
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -375,7 +391,7 @@ struct intel_engine_cs {
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
- struct i915_gem_batch_pool batch_pool;
+ struct intel_engine_pool pool;
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
@@ -404,7 +420,6 @@ struct intel_engine_cs {
const struct intel_context_ops *cops;
int (*request_alloc)(struct i915_request *rq);
- int (*init_context)(struct i915_request *rq);
int (*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
new file mode 100644
index 000000000000..77cd5de83930
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -0,0 +1,303 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/llist.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_user.h"
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+ struct rb_node *p = i915->uabi_engines.rb_node;
+
+ while (p) {
+ struct intel_engine_cs *it =
+ rb_entry(p, typeof(*it), uabi_node);
+
+ if (class < it->uabi_class)
+ p = p->rb_left;
+ else if (class > it->uabi_class ||
+ instance > it->uabi_instance)
+ p = p->rb_right;
+ else if (instance < it->uabi_instance)
+ p = p->rb_left;
+ else
+ return it;
+ }
+
+ return NULL;
+}
+
+void intel_engine_add_user(struct intel_engine_cs *engine)
+{
+ llist_add((struct llist_node *)&engine->uabi_node,
+ (struct llist_head *)&engine->i915->uabi_engines);
+}
+
+static const u8 uabi_classes[] = {
+ [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
+ [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
+ [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
+ [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
+};
+
+static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
+{
+ const struct intel_engine_cs *a =
+ container_of((struct rb_node *)A, typeof(*a), uabi_node);
+ const struct intel_engine_cs *b =
+ container_of((struct rb_node *)B, typeof(*b), uabi_node);
+
+ if (uabi_classes[a->class] < uabi_classes[b->class])
+ return -1;
+ if (uabi_classes[a->class] > uabi_classes[b->class])
+ return 1;
+
+ if (a->instance < b->instance)
+ return -1;
+ if (a->instance > b->instance)
+ return 1;
+
+ return 0;
+}
+
+static struct llist_node *get_engines(struct drm_i915_private *i915)
+{
+ return llist_del_all((struct llist_head *)&i915->uabi_engines);
+}
+
+static void sort_engines(struct drm_i915_private *i915,
+ struct list_head *engines)
+{
+ struct llist_node *pos, *next;
+
+ llist_for_each_safe(pos, next, get_engines(i915)) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)pos, typeof(*engine),
+ uabi_node);
+ list_add((struct list_head *)&engine->uabi_node, engines);
+ }
+ list_sort(NULL, engines, engine_cmp);
+}
+
+static void set_scheduler_caps(struct drm_i915_private *i915)
+{
+ static const struct {
+ u8 engine;
+ u8 sched;
+ } map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(HAS_PREEMPTION, PREEMPTION),
+ MAP(HAS_SEMAPHORES, SEMAPHORES),
+ MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
+#undef MAP
+ };
+ struct intel_engine_cs *engine;
+ u32 enabled, disabled;
+
+ enabled = 0;
+ disabled = 0;
+ for_each_uabi_engine(engine, i915) { /* all engines must agree! */
+ int i;
+
+ if (engine->schedule)
+ enabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+ else
+ disabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (engine->flags & BIT(map[i].engine))
+ enabled |= BIT(map[i].sched);
+ else
+ disabled |= BIT(map[i].sched);
+ }
+ }
+
+ i915->caps.scheduler = enabled & ~disabled;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+ i915->caps.scheduler = 0;
+}
+
+const char *intel_engine_class_repr(u8 class)
+{
+ static const char * const uabi_names[] = {
+ [RENDER_CLASS] = "rcs",
+ [COPY_ENGINE_CLASS] = "bcs",
+ [VIDEO_DECODE_CLASS] = "vcs",
+ [VIDEO_ENHANCEMENT_CLASS] = "vecs",
+ };
+
+ if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
+ return "xxx";
+
+ return uabi_names[class];
+}
+
+struct legacy_ring {
+ struct intel_gt *gt;
+ u8 class;
+ u8 instance;
+};
+
+static int legacy_ring_idx(const struct legacy_ring *ring)
+{
+ static const struct {
+ u8 base, max;
+ } map[] = {
+ [RENDER_CLASS] = { RCS0, 1 },
+ [COPY_ENGINE_CLASS] = { BCS0, 1 },
+ [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
+ [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
+ };
+
+ if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
+ return -1;
+
+ if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
+ return -1;
+
+ return map[ring->class].base + ring->instance;
+}
+
+static void add_legacy_ring(struct legacy_ring *ring,
+ struct intel_engine_cs *engine)
+{
+ int idx;
+
+ if (engine->gt != ring->gt || engine->class != ring->class) {
+ ring->gt = engine->gt;
+ ring->class = engine->class;
+ ring->instance = 0;
+ }
+
+ idx = legacy_ring_idx(ring);
+ if (unlikely(idx == -1))
+ return;
+
+ GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
+ ring->gt->engine[idx] = engine;
+ ring->instance++;
+
+ engine->legacy_idx = idx;
+}
+
+void intel_engines_driver_register(struct drm_i915_private *i915)
+{
+ struct legacy_ring ring = {};
+ u8 uabi_instances[4] = {};
+ struct list_head *it, *next;
+ struct rb_node **p, *prev;
+ LIST_HEAD(engines);
+
+ sort_engines(i915, &engines);
+
+ prev = NULL;
+ p = &i915->uabi_engines.rb_node;
+ list_for_each_safe(it, next, &engines) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)it, typeof(*engine),
+ uabi_node);
+ char old[sizeof(engine->name)];
+
+ GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
+ engine->uabi_class = uabi_classes[engine->class];
+
+ GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
+ engine->uabi_instance = uabi_instances[engine->uabi_class]++;
+
+ /* Replace the internal name with the final user facing name */
+ memcpy(old, engine->name, sizeof(engine->name));
+ scnprintf(engine->name, sizeof(engine->name), "%s%u",
+ intel_engine_class_repr(engine->class),
+ engine->uabi_instance);
+ DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
+
+ rb_link_node(&engine->uabi_node, prev, p);
+ rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
+
+ GEM_BUG_ON(intel_engine_lookup_user(i915,
+ engine->uabi_class,
+ engine->uabi_instance) != engine);
+
+ /* Fix up the mapping to match default execbuf::user_map[] */
+ add_legacy_ring(&ring, engine);
+
+ prev = &engine->uabi_node;
+ p = &prev->rb_right;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+ struct intel_engine_cs *engine;
+ unsigned int isolation;
+ int class, inst;
+ int errors = 0;
+
+ for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
+ for (inst = 0; inst < uabi_instances[class]; inst++) {
+ engine = intel_engine_lookup_user(i915,
+ class, inst);
+ if (!engine) {
+ pr_err("UABI engine not found for { class:%d, instance:%d }\n",
+ class, inst);
+ errors++;
+ continue;
+ }
+
+ if (engine->uabi_class != class ||
+ engine->uabi_instance != inst) {
+ pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
+ engine->name,
+ engine->uabi_class,
+ engine->uabi_instance,
+ class, inst);
+ errors++;
+ continue;
+ }
+ }
+ }
+
+ /*
+ * Make sure that classes with multiple engine instances all
+ * share the same basic configuration.
+ */
+ isolation = intel_engines_has_context_isolation(i915);
+ for_each_uabi_engine(engine, i915) {
+ unsigned int bit = BIT(engine->uabi_class);
+ unsigned int expected = engine->default_state ? bit : 0;
+
+ if ((isolation & bit) != expected) {
+ pr_err("mismatching default context state for class %d on engine %s\n",
+ engine->uabi_class, engine->name);
+ errors++;
+ }
+ }
+
+ if (WARN(errors, "Invalid UABI engine mapping found"))
+ i915->uabi_engines = RB_ROOT;
+ }
+
+ set_scheduler_caps(i915);
+}
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int which;
+
+ which = 0;
+ for_each_uabi_engine(engine, i915)
+ if (engine->default_state)
+ which |= BIT(engine->uabi_class);
+
+ return which;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h
new file mode 100644
index 000000000000..f845ea1cbfaa
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_USER_H
+#define INTEL_ENGINE_USER_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
+
+void intel_engine_add_user(struct intel_engine_cs *engine);
+void intel_engines_driver_register(struct drm_i915_private *i915);
+
+const char *intel_engine_class_repr(u8 class);
+
+#endif /* INTEL_ENGINE_USER_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index eec31e36aca7..86e00a2db8a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -8,6 +8,13 @@
#define _INTEL_GPU_COMMANDS_H_
/*
+ * Target address alignments required for GPU access e.g.
+ * MI_STORE_DWORD_IMM.
+ */
+#define alignof_dword 4
+#define alignof_qword 8
+
+/*
* Instruction field definitions used by the command parser
*/
#define INSTR_CLIENT_SHIFT 29
@@ -179,11 +186,12 @@
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2))
#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
+#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
+#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
+#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
+#define XY_MONO_SRC_COPY_IMM_BLT (2 << 29 | 0x71 << 22 | 5)
#define BLT_WRITE_A (2<<20)
#define BLT_WRITE_RGB (1<<20)
#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
@@ -200,6 +208,8 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29) /* gen11+ */
+#define PIPE_CONTROL_TILE_CACHE_FLUSH (1<<28) /* gen11+ */
#define PIPE_CONTROL_FLUSH_L3 (1<<27)
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
new file mode 100644
index 000000000000..d48ec9a76ed1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_uncore.h"
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+{
+ gt->i915 = i915;
+ gt->uncore = &i915->uncore;
+
+ spin_lock_init(&gt->irq_lock);
+
+ INIT_LIST_HEAD(&gt->closed_vma);
+ spin_lock_init(&gt->closed_lock);
+
+ intel_gt_init_hangcheck(gt);
+ intel_gt_init_reset(gt);
+ intel_gt_pm_init_early(gt);
+ intel_uc_init_early(&gt->uc);
+}
+
+void intel_gt_init_hw(struct drm_i915_private *i915)
+{
+ i915->gt.ggtt = &i915->ggtt;
+}
+
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
+{
+ GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+ GEN6_RING_FAULT_REG_POSTING_READ(engine);
+}
+
+void
+intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 eir;
+
+ if (!IS_GEN(i915, 2))
+ clear_register(uncore, PGTBL_ER);
+
+ if (INTEL_GEN(i915) < 4)
+ clear_register(uncore, IPEIR(RENDER_RING_BASE));
+ else
+ clear_register(uncore, IPEIR_I965);
+
+ clear_register(uncore, EIR);
+ eir = intel_uncore_read(uncore, EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+ rmw_set(uncore, EMR, eir);
+ intel_uncore_write(uncore, GEN2_IIR,
+ I915_MASTER_ERROR_INTERRUPT);
+ }
+
+ if (INTEL_GEN(i915) >= 12) {
+ rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 8) {
+ rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 6) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine_masked(engine, i915, engine_mask, id)
+ gen8_clear_engine_error_register(engine);
+ }
+}
+
+static void gen6_check_faults(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 fault;
+
+ for_each_engine(engine, gt->i915, id) {
+ fault = GEN6_RING_FAULT_REG_READ(engine);
+ if (fault & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+ }
+}
+
+static void gen8_check_faults(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
+ u32 fault;
+
+ if (INTEL_GEN(gt->i915) >= 12) {
+ fault_reg = GEN12_RING_FAULT_REG;
+ fault_data0_reg = GEN12_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN12_FAULT_TLB_DATA1;
+ } else {
+ fault_reg = GEN8_RING_FAULT_REG;
+ fault_data0_reg = GEN8_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN8_FAULT_TLB_DATA1;
+ }
+
+ fault = intel_uncore_read(uncore, fault_reg);
+ if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
+ fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
+
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr),
+ lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+}
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(i915) >= 8)
+ gen8_check_faults(gt);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_check_faults(gt);
+ else
+ return;
+
+ intel_gt_clear_error_registers(gt, ALL_ENGINES);
+}
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ intel_wakeref_t wakeref;
+
+ /*
+ * No actual flushing is required for the GTT write domain for reads
+ * from the GTT domain. Writes to it "immediately" go to main memory
+ * as far as we know, so there's no chipset flush. It also doesn't
+ * land in the GPU render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
+ *
+ * We also have to wait a bit for the writes to land from the GTT.
+ * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+ * timing. This issue has only been observed when switching quickly
+ * between GTT writes and CPU reads from inside the kernel on recent hw,
+ * and it appears to only affect discrete GTT blocks (i.e. on LLC
+ * system agents we cannot reproduce this behaviour, until Cannonlake
+ * that was!).
+ */
+
+ wmb();
+
+ if (INTEL_INFO(i915)->has_coherent_ggtt)
+ return;
+
+ intel_gt_chipset_flush(gt);
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ struct intel_uncore *uncore = gt->uncore;
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_posting_read_fw(uncore,
+ RING_HEAD(RENDER_RING_BASE));
+ spin_unlock_irq(&uncore->lock);
+ }
+}
+
+void intel_gt_chipset_flush(struct intel_gt *gt)
+{
+ wmb();
+ if (INTEL_GEN(gt->i915) < 6)
+ intel_gtt_chipset_flush();
+}
+
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ gt->scratch = i915_vma_make_unshrinkable(vma);
+
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+void intel_gt_fini_scratch(struct intel_gt *gt)
+{
+ i915_vma_unpin_and_release(&gt->scratch, 0);
+}
+
+void intel_gt_driver_late_release(struct intel_gt *gt)
+{
+ intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_reset(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
new file mode 100644
index 000000000000..4920cb351f10
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT__
+#define __INTEL_GT__
+
+#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_reset.h"
+
+struct drm_i915_private;
+
+static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
+{
+ return container_of(uc, struct intel_gt, uc);
+}
+
+static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
+{
+ return container_of(guc, struct intel_gt, uc.guc);
+}
+
+static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
+{
+ return container_of(huc, struct intel_gt, uc.huc);
+}
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+void intel_gt_init_hw(struct drm_i915_private *i915);
+
+void intel_gt_driver_late_release(struct intel_gt *gt);
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt);
+void intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask);
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
+void intel_gt_chipset_flush(struct intel_gt *gt);
+
+void intel_gt_init_hangcheck(struct intel_gt *gt);
+
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
+void intel_gt_fini_scratch(struct intel_gt *gt);
+
+static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
+ enum intel_gt_scratch_field field)
+{
+ return i915_ggtt_offset(gt->scratch) + field;
+}
+
+static inline bool intel_gt_is_wedged(struct intel_gt *gt)
+{
+ return __intel_reset_failed(&gt->reset);
+}
+
+void intel_gt_queue_hangcheck(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
new file mode 100644
index 000000000000..34a4fb624bf7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -0,0 +1,455 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/sched/clock.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_uncore.h"
+
+static void guc_irq_handler(struct intel_guc *guc, u16 iir)
+{
+ if (iir & GUC_INTR_GUC2HOST)
+ intel_guc_to_host_event_handler(guc);
+}
+
+static void
+cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
+{
+ bool tasklet = false;
+
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
+
+ if (iir & GT_RENDER_USER_INTERRUPT) {
+ intel_engine_breadcrumbs_irq(engine);
+ tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
+ }
+
+ if (tasklet)
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static u32
+gen11_gt_engine_identity(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 timeout_ts;
+ u32 ident;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
+
+ /*
+ * NB: Specs do not specify how long to spin wait,
+ * so we do ~100us as an educated guess.
+ */
+ timeout_ts = (local_clock() >> 10) + 100;
+ do {
+ ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
+ } while (!(ident & GEN11_INTR_DATA_VALID) &&
+ !time_after32(local_clock() >> 10, timeout_ts));
+
+ if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+ DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
+ return 0;
+ }
+
+ raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
+ GEN11_INTR_DATA_VALID);
+
+ return ident;
+}
+
+static void
+gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
+ const u16 iir)
+{
+ if (instance == OTHER_GUC_INSTANCE)
+ return guc_irq_handler(&gt->uc.guc, iir);
+
+ if (instance == OTHER_GTPM_INSTANCE)
+ return gen11_rps_irq_handler(gt, iir);
+
+ WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+ instance, iir);
+}
+
+static void
+gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
+ const u8 instance, const u16 iir)
+{
+ struct intel_engine_cs *engine;
+
+ if (instance <= MAX_ENGINE_INSTANCE)
+ engine = gt->engine_class[class][instance];
+ else
+ engine = NULL;
+
+ if (likely(engine))
+ return cs_irq_handler(engine, iir);
+
+ WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
+ class, instance);
+}
+
+static void
+gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
+{
+ const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
+ const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
+ const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
+
+ if (unlikely(!intr))
+ return;
+
+ if (class <= COPY_ENGINE_CLASS)
+ return gen11_engine_irq_handler(gt, class, instance, intr);
+
+ if (class == OTHER_CLASS)
+ return gen11_other_irq_handler(gt, instance, intr);
+
+ WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
+ class, instance, intr);
+}
+
+static void
+gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ unsigned long intr_dw;
+ unsigned int bit;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+ for_each_set_bit(bit, &intr_dw, 32) {
+ const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
+
+ gen11_gt_identity_handler(gt, ident);
+ }
+
+ /* Clear must be after shared has been served for engine */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+}
+
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
+{
+ unsigned int bank;
+
+ spin_lock(&gt->irq_lock);
+
+ for (bank = 0; bank < 2; bank++) {
+ if (master_ctl & GEN11_GT_DW_IRQ(bank))
+ gen11_gt_bank_handler(gt, bank);
+ }
+
+ spin_unlock(&gt->irq_lock);
+}
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 dw;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+ if (dw & BIT(bit)) {
+ /*
+ * According to the BSpec, DW_IIR bits cannot be cleared without
+ * first servicing the Selector & Shared IIR registers.
+ */
+ gen11_gt_engine_identity(gt, bank, bit);
+
+ /*
+ * We locked GT INT DW by reading it. If we want to (try
+ * to) recover from this successfully, we need to clear
+ * our bit, otherwise we are locking the register for
+ * everybody.
+ */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
+
+ return true;
+ }
+
+ return false;
+}
+
+void gen11_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* Disable RCS, BCS, VCS and VECS class engines. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
+
+ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
+
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+}
+
+void gen11_gt_irq_postinstall(struct intel_gt *gt)
+{
+ const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 dmask = irqs << 16 | irqs;
+ const u32 smask = irqs << 16;
+
+ BUILD_BUG_ON(irqs & 0xffff0000);
+
+ /* Enable RCS, BCS, VCS and VECS class interrupts. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
+
+ /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
+
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+
+ /* Same thing for GuC interrupts */
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+}
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+}
+
+static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
+{
+ if (!HAS_L3_DPF(gt->i915))
+ return;
+
+ spin_lock(&gt->irq_lock);
+ gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
+ spin_unlock(&gt->irq_lock);
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+ gt->i915->l3_parity.which_slice |= 1 << 1;
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ gt->i915->l3_parity.which_slice |= 1 << 0;
+
+ schedule_work(&gt->i915->l3_parity.error_work);
+}
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]);
+
+ if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+ GT_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
+
+ if (gt_iir & GT_PARITY_ERROR(gt->i915))
+ gen7_parity_error_irq_handler(gt, gt_iir);
+}
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+ void __iomem * const regs = gt->uncore->regs;
+
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(gt_iir[0]))
+ raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
+ }
+
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(gt_iir[1]))
+ raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
+ }
+
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(gt_iir[2]))
+ raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(gt_iir[3]))
+ raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
+ }
+}
+
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+ gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+ gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
+ }
+
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+ }
+
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gen6_rps_irq_handler(gt->i915, gt_iir[2]);
+ guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+ }
+}
+
+void gen8_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
+}
+
+void gen8_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* These are interrupts we'll toggle with the ring mask register */
+ u32 gt_interrupts[] = {
+ (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
+ 0,
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ };
+
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
+ */
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+}
+
+static void gen5_gt_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
+
+ gt->gt_imr &= ~interrupt_mask;
+ gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
+ intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
+}
+
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, mask);
+ intel_uncore_posting_read_fw(gt->uncore, GTIMR);
+}
+
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, 0);
+}
+
+void gen5_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
+ if (INTEL_GEN(gt->i915) >= 6)
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
+}
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 pm_irqs = 0;
+ u32 gt_irqs = 0;
+
+ gt->gt_imr = ~0;
+ if (HAS_L3_DPF(gt->i915)) {
+ /* L3 parity interrupt is always unmasked. */
+ gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
+ gt_irqs |= GT_PARITY_ERROR(gt->i915);
+ }
+
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (IS_GEN(gt->i915, 5))
+ gt_irqs |= ILK_BSD_USER_INTERRUPT;
+ else
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+
+ GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
+
+ if (INTEL_GEN(gt->i915) >= 6) {
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
+ if (HAS_ENGINE(gt->i915, VECS0)) {
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
+
+ gt->pm_imr = 0xffffffff;
+ GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
new file mode 100644
index 000000000000..8f37593712c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_IRQ_H
+#define INTEL_GT_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
+ GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
+ GEN8_GT_VCS1_IRQ | \
+ GEN8_GT_VECS_IRQ | \
+ GEN8_GT_PM_IRQ | \
+ GEN8_GT_GUC_IRQ)
+
+void gen11_gt_irq_reset(struct intel_gt *gt);
+void gen11_gt_irq_postinstall(struct intel_gt *gt);
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl);
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank,
+ const unsigned int bit);
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt);
+void gen5_gt_irq_reset(struct intel_gt *gt);
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask);
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_reset(struct intel_gt *gt);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_postinstall(struct intel_gt *gt);
+
+#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 9f8f7f54191f..1363e069ec83 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,7 +5,9 @@
*/
#include "i915_drv.h"
+#include "i915_params.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"
@@ -15,10 +17,10 @@ static void pm_notify(struct drm_i915_private *i915, int state)
blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
}
-static int intel_gt_unpark(struct intel_wakeref *wf)
+static int __gt_unpark(struct intel_wakeref *wf)
{
- struct drm_i915_private *i915 =
- container_of(wf, typeof(*i915), gt.wakeref);
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ struct drm_i915_private *i915 = gt->i915;
GEM_TRACE("\n");
@@ -33,8 +35,8 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
- i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
- GEM_BUG_ON(!i915->gt.awake);
+ gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!gt->awake);
intel_enable_gt_powersave(i915);
@@ -44,19 +46,14 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);
- i915_queue_hangcheck(i915);
+ intel_gt_queue_hangcheck(gt);
pm_notify(i915, INTEL_GT_UNPARK);
return 0;
}
-void intel_gt_pm_get(struct drm_i915_private *i915)
-{
- intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
-}
-
-static int intel_gt_park(struct intel_wakeref *wf)
+static int __gt_park(struct intel_wakeref *wf)
{
struct drm_i915_private *i915 =
container_of(wf, typeof(*i915), gt.wakeref);
@@ -70,34 +67,39 @@ static int intel_gt_park(struct intel_wakeref *wf)
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
+ /* Everything switched off, flush any residual interrupt just in case */
+ intel_synchronize_irq(i915);
+
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return 0;
}
-void intel_gt_pm_put(struct drm_i915_private *i915)
-{
- intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
-}
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __gt_unpark,
+ .put = __gt_park,
+ .flags = INTEL_WAKEREF_PUT_ASYNC,
+};
-void intel_gt_pm_init(struct drm_i915_private *i915)
+void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&i915->gt.wakeref);
- BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
+ intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
}
-static bool reset_engines(struct drm_i915_private *i915)
+static bool reset_engines(struct intel_gt *gt)
{
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
return false;
- return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+ return __intel_gt_reset(gt, ALL_ENGINES) == 0;
}
/**
* intel_gt_sanitize: called after the GPU has lost power
- * @i915: the i915 device
+ * @gt: the i915 GT container
* @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
@@ -105,21 +107,23 @@ static bool reset_engines(struct drm_i915_private *i915)
* to match. Note that calling intel_gt_sanitize() if the GPU has not
* been reset results in much confusion!
*/
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+void intel_gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
- if (!reset_engines(i915) && !force)
+ intel_uc_sanitize(&gt->uc);
+
+ if (!reset_engines(gt) && !force)
return;
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, false);
+ for_each_engine(engine, gt->i915, id)
+ __intel_engine_reset(engine, false);
}
-int intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -131,8 +135,8 @@ int intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id) {
+ intel_gt_pm_get(gt);
+ for_each_engine(engine, gt->i915, id) {
struct intel_context *ce;
intel_engine_pm_get(engine);
@@ -141,22 +145,30 @@ int intel_gt_resume(struct drm_i915_private *i915)
if (ce)
ce->ops->reset(ce);
- ce = engine->preempt_context;
- if (ce)
- ce->ops->reset(ce);
-
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
- dev_err(i915->drm.dev,
+ dev_err(gt->i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
}
}
- intel_gt_pm_put(i915);
+ intel_gt_pm_put(gt);
return err;
}
+
+void intel_gt_runtime_suspend(struct intel_gt *gt)
+{
+ intel_uc_runtime_suspend(&gt->uc);
+}
+
+int intel_gt_runtime_resume(struct intel_gt *gt)
+{
+ intel_gt_init_swizzling(gt);
+
+ return intel_uc_runtime_resume(&gt->uc);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 53f342b20181..fb39d99cd6ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -9,19 +9,44 @@
#include <linux/types.h>
-struct drm_i915_private;
+#include "intel_gt_types.h"
+#include "intel_wakeref.h"
enum {
INTEL_GT_UNPARK,
INTEL_GT_PARK,
};
-void intel_gt_pm_get(struct drm_i915_private *i915);
-void intel_gt_pm_put(struct drm_i915_private *i915);
-
-void intel_gt_pm_init(struct drm_i915_private *i915);
-
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-int intel_gt_resume(struct drm_i915_private *i915);
+static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
+{
+ return intel_wakeref_is_active(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_wakeref_get(&gt->wakeref);
+}
+
+static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+{
+ return intel_wakeref_get_if_active(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_put(struct intel_gt *gt)
+{
+ intel_wakeref_put(&gt->wakeref);
+}
+
+static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
+{
+ return intel_wakeref_wait_for_idle(&gt->wakeref);
+}
+
+void intel_gt_pm_init_early(struct intel_gt *gt);
+
+void intel_gt_sanitize(struct intel_gt *gt, bool force);
+int intel_gt_resume(struct intel_gt *gt);
+void intel_gt_runtime_suspend(struct intel_gt *gt);
+int intel_gt_runtime_resume(struct intel_gt *gt);
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
new file mode 100644
index 000000000000..babe866126d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -0,0 +1,109 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+
+static void write_pm_imr(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_imr;
+ i915_reg_t reg;
+
+ if (INTEL_GEN(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ mask <<= 16; /* pm is in upper half */
+ } else if (INTEL_GEN(i915) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+static void gen6_gt_pm_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ u32 new_val;
+
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ new_val = gt->pm_imr;
+ new_val &= ~interrupt_mask;
+ new_val |= ~enabled_irq_mask & interrupt_mask;
+
+ if (new_val != gt->pm_imr) {
+ gt->pm_imr = new_val;
+ write_pm_imr(gt);
+ }
+}
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, mask);
+}
+
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, 0);
+}
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_posting_read(uncore, reg);
+}
+
+static void write_pm_ier(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_ier;
+ i915_reg_t reg;
+
+ if (INTEL_GEN(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ mask <<= 16; /* pm is in upper half */
+ } else if (INTEL_GEN(i915) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ gt->pm_ier |= enable_mask;
+ write_pm_ier(gt);
+ gen6_gt_pm_unmask_irq(gt, enable_mask);
+}
+
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ gt->pm_ier &= ~disable_mask;
+ gen6_gt_pm_mask_irq(gt, disable_mask);
+ write_pm_ier(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
new file mode 100644
index 000000000000..b29816a04809
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_IRQ_H
+#define INTEL_GT_PM_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask);
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask);
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask);
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask);
+
+#endif /* INTEL_GT_PM_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
new file mode 100644
index 000000000000..dc295c196d11
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_TYPES__
+#define __INTEL_GT_TYPES__
+
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "uc/intel_uc.h"
+
+#include "i915_vma.h"
+#include "intel_engine_types.h"
+#include "intel_reset_types.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct i915_ggtt;
+struct intel_engine_cs;
+struct intel_uncore;
+
+struct intel_hangcheck {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+
+ struct delayed_work work;
+};
+
+struct intel_gt {
+ struct drm_i915_private *i915;
+ struct intel_uncore *uncore;
+ struct i915_ggtt *ggtt;
+
+ struct intel_uc uc;
+
+ struct intel_gt_timelines {
+ spinlock_t lock; /* protects active_list */
+ struct list_head active_list;
+
+ /* Pack multiple timelines' seqnos into the same page */
+ spinlock_t hwsp_lock;
+ struct list_head hwsp_free_list;
+ } timelines;
+
+ struct intel_wakeref wakeref;
+
+ struct list_head closed_vma;
+ spinlock_t closed_lock; /* guards the list of closed_vma */
+
+ struct intel_hangcheck hangcheck;
+ struct intel_reset reset;
+
+ /**
+ * Is the GPU currently considered idle, or busy executing
+ * userspace requests? Whilst idle, we allow runtime power
+ * management to power down the hardware and display clocks.
+ * In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ intel_wakeref_t awake;
+
+ struct blocking_notifier_head pm_notifications;
+
+ ktime_t last_init_time;
+
+ struct i915_vma *scratch;
+
+ spinlock_t irq_lock;
+ u32 gt_imr;
+ u32 pm_ier;
+ u32 pm_imr;
+
+ u32 pm_guc_events;
+
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+ [MAX_ENGINE_INSTANCE + 1];
+};
+
+enum intel_gt_scratch_field {
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+
+};
+
+#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
index 6bcfa6456c45..05d042cdefe2 100644
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
@@ -22,8 +22,10 @@
*
*/
-#include "intel_reset.h"
#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_reset.h"
struct hangcheck {
u64 acthd;
@@ -57,9 +59,6 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
int slice;
int subslice;
- if (engine->id != RCS0)
- return true;
-
intel_engine_get_instdone(engine, &instdone);
/* There might be unstable subunit states even when
@@ -103,7 +102,6 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
static enum intel_engine_hangcheck_action
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_i915_private *dev_priv = engine->i915;
enum intel_engine_hangcheck_action ha;
u32 tmp;
@@ -111,7 +109,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
if (ha != ENGINE_DEAD)
return ha;
- if (IS_GEN(dev_priv, 2))
+ if (IS_GEN(engine->i915, 2))
return ENGINE_DEAD;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -121,8 +119,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = ENGINE_READ(engine, RING_CTL);
if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, engine->mask, 0,
- "stuck wait on %s", engine->name);
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "stuck wait on %s", engine->name);
ENGINE_WRITE(engine, RING_CTL, tmp);
return ENGINE_WAIT_KICK;
}
@@ -222,7 +220,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
I915_ENGINE_WEDGED_TIMEOUT);
}
-static void hangcheck_declare_hang(struct drm_i915_private *i915,
+static void hangcheck_declare_hang(struct intel_gt *gt,
intel_engine_mask_t hung,
intel_engine_mask_t stuck)
{
@@ -238,12 +236,12 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
hung &= ~stuck;
len = scnprintf(msg, sizeof(msg),
"%s on ", stuck == hung ? "no progress" : "hang");
- for_each_engine_masked(engine, i915, hung, tmp)
+ for_each_engine_masked(engine, gt->i915, hung, tmp)
len += scnprintf(msg + len, sizeof(msg) - len,
"%s, ", engine->name);
msg[len-2] = '\0';
- return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
+ return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg);
}
/*
@@ -254,11 +252,10 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
-static void i915_hangcheck_elapsed(struct work_struct *work)
+static void hangcheck_elapsed(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), hangcheck.work.work);
intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -267,13 +264,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!i915_modparams.enable_hangcheck)
return;
- if (!READ_ONCE(dev_priv->gt.awake))
+ if (!READ_ONCE(gt->awake))
return;
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(gt))
return;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(&gt->i915->runtime_pm);
if (!wakeref)
return;
@@ -281,9 +278,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
- intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+ intel_uncore_arm_unclaimed_mmio_detection(gt->uncore);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt->i915, id) {
struct hangcheck hc;
intel_engine_signal_breadcrumbs(engine);
@@ -305,7 +302,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (GEM_SHOW_DEBUG() && (hung | stuck)) {
struct drm_printer p = drm_debug_printer("hangcheck");
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt->i915, id) {
if (intel_engine_is_idle(engine))
continue;
@@ -314,20 +311,37 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
}
if (wedged) {
- dev_err(dev_priv->drm.dev,
+ dev_err(gt->i915->drm.dev,
"GPU recovery timed out,"
" cancelling all in-flight rendering.\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(gt);
}
if (hung)
- hangcheck_declare_hang(dev_priv, hung, stuck);
+ hangcheck_declare_hang(gt, hung, stuck);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
/* Reset timer in case GPU hangs without another request being added */
- i915_queue_hangcheck(dev_priv);
+ intel_gt_queue_hangcheck(gt);
+}
+
+void intel_gt_queue_hangcheck(struct intel_gt *gt)
+{
+ unsigned long delay;
+
+ if (unlikely(!i915_modparams.enable_hangcheck))
+ return;
+
+ /*
+ * Don't continually defer the hangcheck so that it is always run at
+ * least once after work has been scheduled on any ring. Otherwise,
+ * we will ignore a hung ring if a second ring is kept busy.
+ */
+
+ delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
+ queue_delayed_work(system_long_wq, &gt->hangcheck.work, delay);
}
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
@@ -336,10 +350,9 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
engine->hangcheck.action_timestamp = jiffies;
}
-void intel_hangcheck_init(struct drm_i915_private *i915)
+void intel_gt_init_hangcheck(struct intel_gt *gt)
{
- INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
+ INIT_DELAYED_WORK(&gt->hangcheck.work, hangcheck_elapsed);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 82b7ace62d97..d42584439f51 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -136,9 +136,12 @@
#include "gem/i915_gem_context.h"
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
+#include "i915_perf.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
@@ -161,6 +164,15 @@
#define GEN8_CTX_STATUS_COMPLETED_MASK \
(GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2
@@ -214,13 +226,34 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
return container_of(engine, struct virtual_engine, base);
}
-static int execlists_context_deferred_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine);
+static int __execlists_context_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
static void execlists_init_reg_state(u32 *reg_state,
struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring);
+static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static inline void
+ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -236,6 +269,17 @@ static int effective_prio(const struct i915_request *rq)
int prio = rq_prio(rq);
/*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ /*
* On unwinding the active request, we give it a priority bump
* if it has completed waiting on any semaphore. If we know that
* the request has already started, we can prevent an unwanted
@@ -245,6 +289,7 @@ static int effective_prio(const struct i915_request *rq)
prio |= I915_PRIORITY_NOSEMAPHORE;
/* Restrict mere WAIT boosts from triggering preemption */
+ BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
return prio | __NO_PREEMPTION;
}
@@ -271,10 +316,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
{
int last_prio;
- if (!engine->preempt_context)
- return false;
-
- if (i915_request_completed(rq))
+ if (!intel_engine_has_semaphores(engine))
return false;
/*
@@ -338,9 +380,6 @@ __maybe_unused static inline bool
assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
- const struct intel_engine_execlists *execlists =
- &prev->engine->execlists;
-
/*
* Without preemption, the prev may refer to the still active element
* which we refuse to let go.
@@ -348,7 +387,7 @@ assert_priority_queue(const struct i915_request *prev,
* Even with preemption, there are times when we think it is better not
* to preempt and leave an ostensibly lower priority request in flight.
*/
- if (port_request(execlists->port) == prev)
+ if (i915_request_is_active(prev))
return true;
return rq_prio(prev) >= rq_prio(next);
@@ -389,13 +428,17 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
- desc = ctx->desc_template; /* bits 0-11 */
- GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (IS_GEN(engine->i915, 8))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
/* bits 12-31 */
- GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
-
/*
* The following 32bits are copied into the OA reports (dword 2).
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
@@ -442,13 +485,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
struct intel_engine_cs *owner;
if (i915_request_completed(rq))
- break;
+ continue; /* XXX */
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
- GEM_BUG_ON(rq->hw_context->inflight);
-
/*
* Push the request back into the queue for later resubmission.
* If this request is not native to this physical engine (i.e.
@@ -468,6 +509,19 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
active = rq;
} else {
+ /*
+ * Decouple the virtual breadcrumb before moving it
+ * back to the virtual engine -- we don't want the
+ * request to complete in the background and try
+ * and cancel the breadcrumb on the virtual engine
+ * (instead of the old engine where it is linked)!
+ */
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &rq->fence.flags)) {
+ spin_lock(&rq->lock);
+ i915_request_cancel_breadcrumb(rq);
+ spin_unlock(&rq->lock);
+ }
rq->engine = owner;
owner->submit_request(rq);
active = NULL;
@@ -500,32 +554,45 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
-inline void
-execlists_user_begin(struct intel_engine_execlists *execlists,
- const struct execlist_port *port)
+static inline struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
{
- execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
-}
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->hw_context;
-inline void
-execlists_user_end(struct intel_engine_execlists *execlists)
-{
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
+ intel_context_get(ce);
+
+ intel_gt_pm_get(engine->gt);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(engine);
+
+ return engine;
}
-static inline void
-execlists_context_schedule_in(struct i915_request *rq)
+static inline struct i915_request *
+execlists_schedule_in(struct i915_request *rq, int idx)
{
- GEM_BUG_ON(rq->hw_context->inflight);
+ struct intel_context * const ce = rq->hw_context;
+ struct intel_engine_cs *old;
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(rq->engine);
- rq->hw_context->inflight = rq->engine;
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
+ trace_i915_request_in(rq, idx);
+
+ old = READ_ONCE(ce->inflight);
+ do {
+ if (!old) {
+ WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
+ break;
+ }
+ } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
+
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+ return i915_request_get(rq);
}
-static void kick_siblings(struct i915_request *rq)
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
{
- struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine);
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
struct i915_request *next = READ_ONCE(ve->request);
if (next && next->execution_mask & ~rq->execution_mask)
@@ -533,29 +600,53 @@ static void kick_siblings(struct i915_request *rq)
}
static inline void
-execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
+__execlists_schedule_out(struct i915_request *rq,
+ struct intel_engine_cs * const engine)
{
- rq->hw_context->inflight = NULL;
- intel_engine_context_out(rq->engine);
- execlists_context_status_change(rq, status);
- trace_i915_request_out(rq);
+ struct intel_context * const ce = rq->hw_context;
+
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ intel_gt_pm_put(engine->gt);
/*
- * If this is part of a virtual engine, its next request may have
- * been blocked waiting for access to the active context. We have
- * to kick all the siblings again in case we need to switch (e.g.
- * the next request is not runnable on this engine). Hopefully,
- * we will already have submitted the next request before the
- * tasklet runs and do not need to rebuild each virtual tree
- * and kick everyone again.
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
*/
- if (rq->engine != rq->hw_context->engine)
- kick_siblings(rq);
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
+
+ intel_context_put(ce);
+}
+
+static inline void
+execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->hw_context;
+ struct intel_engine_cs *cur, *old;
+
+ trace_i915_request_out(rq);
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+
+ old = READ_ONCE(ce->inflight);
+ do
+ cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
+ while (!try_cmpxchg(&ce->inflight, &old, cur));
+ if (!cur)
+ __execlists_schedule_out(rq, old);
+
+ i915_request_put(rq);
}
-static u64 execlists_update_context(struct i915_request *rq)
+static u64 execlists_update_context(const struct i915_request *rq)
{
struct intel_context *ce = rq->hw_context;
+ u64 desc;
ce->lrc_reg_state[CTX_RING_TAIL + 1] =
intel_ring_set_tail(rq->ring, rq->tail);
@@ -576,7 +667,11 @@ static u64 execlists_update_context(struct i915_request *rq)
* wmb).
*/
mb();
- return ce->lrc_desc;
+
+ desc = ce->lrc_desc;
+ ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+
+ return desc;
}
static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
@@ -590,12 +685,65 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
}
}
+static __maybe_unused void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
+ engine->name, msg,
+ ports[0]->fence.context,
+ ports[0]->fence.seqno,
+ i915_request_completed(ports[0]) ? "!" :
+ i915_request_started(ports[0]) ? "*" :
+ "",
+ ports[1] ? ports[1]->fence.context : 0,
+ ports[1] ? ports[1]->fence.seqno : 0);
+}
+
+static __maybe_unused bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct i915_request * const *port, *rq;
+ struct intel_context *ce = NULL;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ if (!execlists->pending[0])
+ return false;
+
+ if (execlists->pending[execlists_num_ports(execlists)])
+ return false;
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ if (ce == rq->hw_context)
+ return false;
+
+ ce = rq->hw_context;
+ if (i915_request_completed(rq))
+ continue;
+
+ if (i915_active_is_idle(&ce->active))
+ return false;
+
+ if (!i915_vma_is_pinned(ce->state))
+ return false;
+ }
+
+ return ce;
+}
+
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
unsigned int n;
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
/*
* We can skip acquiring intel_runtime_pm_get() here as it was taken
* on our behalf by the request (see i915_gem_mark_busy()) and it will
@@ -604,7 +752,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* that all ELSP are drained i.e. we have processed the CSB,
* before allowing ourselves to idle and calling intel_runtime_pm_put().
*/
- GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
/*
* ELSQ note: the submit queue is not cleared after being submitted
@@ -613,38 +761,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* of elsq entries, keep this in mind before changing the loop below.
*/
for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq;
- unsigned int count;
- u64 desc;
-
- rq = port_unpack(&port[n], &count);
- if (rq) {
- GEM_BUG_ON(count > !n);
- if (!count++)
- execlists_context_schedule_in(rq);
- port_set(&port[n], port_pack(rq, count));
- desc = execlists_update_context(rq);
- GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
-
- GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
- engine->name, n,
- port[n].context_id, count,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq),
- rq_prio(rq));
- } else {
- GEM_BUG_ON(!n);
- desc = 0;
- }
+ struct i915_request *rq = execlists->pending[n];
- write_desc(execlists, desc, n);
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
}
/* we need to manually load the submit queue */
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct intel_context *ce)
@@ -668,6 +794,7 @@ static bool can_merge_ctx(const struct intel_context *prev,
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
+ GEM_BUG_ON(prev == next);
GEM_BUG_ON(!assert_priority_queue(prev, next));
if (!can_merge_ctx(prev->hw_context, next->hw_context))
@@ -676,58 +803,6 @@ static bool can_merge_rq(const struct i915_request *prev,
return true;
}
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
- GEM_BUG_ON(rq == port_request(port));
-
- if (port_isset(port))
- i915_request_put(port_request(port));
-
- port_set(port, port_pack(i915_request_get(rq), port_count(port)));
-}
-
-static void inject_preempt_context(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- struct intel_context *ce = engine->preempt_context;
- unsigned int n;
-
- GEM_BUG_ON(execlists->preempt_complete_status !=
- upper_32_bits(ce->lrc_desc));
-
- /*
- * Switch to our empty preempt context so
- * the state of the GPU is known (idle).
- */
- GEM_TRACE("%s\n", engine->name);
- for (n = execlists_num_ports(execlists); --n; )
- write_desc(execlists, 0, n);
-
- write_desc(execlists, ce->lrc_desc, n);
-
- /* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
- execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
-
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
-}
-
-static void complete_preempt_context(struct intel_engine_execlists *execlists)
-{
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
-
- if (inject_preempt_hang(execlists))
- return;
-
- execlists_cancel_port_requests(execlists);
- __unwind_incomplete_requests(container_of(execlists,
- struct intel_engine_cs,
- execlists));
-}
-
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
@@ -792,7 +867,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
* we reuse the register offsets). This is a very small
* hystersis on the greedy seelction algorithm.
*/
- inflight = READ_ONCE(ve->context.inflight);
+ inflight = intel_context_inflight(&ve->context);
if (inflight && inflight != engine)
return false;
@@ -815,13 +890,120 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock);
}
+static struct i915_request *
+last_active(const struct intel_engine_execlists *execlists)
+{
+ struct i915_request * const *last = execlists->active;
+
+ while (*last && i915_request_completed(*last))
+ last++;
+
+ return *last;
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_started(w) &&
+ !i915_request_completed(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (list_empty(&w->sched.link))
+ continue; /* Not yet submitted; unready */
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ int hint;
+
+ if (!intel_engine_has_semaphores(engine))
+ return false;
+
+ if (list_is_last(&rq->sched.link, &engine->active.requests))
+ return false;
+
+ hint = max(rq_prio(list_next_entry(rq, sched.link)),
+ engine->execlists.queue_priority_hint);
+
+ return hint >= effective_prio(rq);
+}
+
+static int
+switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ if (list_is_last(&rq->sched.link, &engine->active.requests))
+ return INT_MIN;
+
+ return rq_prio(list_next_entry(rq, sched.link));
+}
+
+static bool
+enable_timeslice(const struct intel_engine_execlists *execlists)
+{
+ const struct i915_request *rq = *execlists->active;
+
+ if (i915_request_completed(rq))
+ return false;
+
+ return execlists->switch_priority_hint >= effective_prio(rq);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct i915_request *last = port_request(port);
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last;
struct rb_node *rb;
bool submit = false;
@@ -867,65 +1049,100 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
break;
}
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ */
+ last = last_active(execlists);
if (last) {
- /*
- * Don't resubmit or switch until all outstanding
- * preemptions (lite-restore) are seen. Then we
- * know the next preemption status we see corresponds
- * to this ELSP update.
- */
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(!port_count(&port[0]));
+ if (need_preempt(engine, last, rb)) {
+ GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ engine->name,
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+ record_preemption(execlists);
- /*
- * If we write to ELSP a second time before the HW has had
- * a chance to respond to the previous write, we can confuse
- * the HW and hit "undefined behaviour". After writing to ELSP,
- * we must then wait until we see a context-switch event from
- * the HW to indicate that it has had a chance to respond.
- */
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
- return;
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
- if (need_preempt(engine, last, rb)) {
- inject_preempt_context(engine);
- return;
- }
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
- /*
- * In theory, we could coalesce more requests onto
- * the second port (the first port is active, with
- * no preemptions pending). However, that means we
- * then have to deal with the possible lite-restore
- * of the second port (as we submit the ELSP, there
- * may be a context-switch) but also we may complete
- * the resubmission before the context-switch. Ergo,
- * coalescing onto the second port will cause a
- * preemption event, but we cannot predict whether
- * that will affect port[0] or port[1].
- *
- * If the second port is already active, we can wait
- * until the next context-switch before contemplating
- * new requests. The GPU will be busy and we should be
- * able to resubmit the new ELSP before it idles,
- * avoiding pipeline bubbles (momentary pauses where
- * the driver is unable to keep up the supply of new
- * work). However, we have to double check that the
- * priorities of the ports haven't been switch.
- */
- if (port_count(&port[1]))
- return;
+ /*
+ * If we need to return to the preempted context, we
+ * need to skip the lite-restore and force it to
+ * reload the RING_TAIL. Otherwise, the HW has a
+ * tendency to ignore us rewinding the TAIL to the
+ * end of an earlier request.
+ */
+ last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ last = NULL;
+ } else if (need_timeslice(engine, last) &&
+ !timer_pending(&engine->execlists.timer)) {
+ GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
+ engine->name,
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
- /*
- * WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent
- * ring:HEAD == rq:TAIL as we resubmit the
- * request. See gen8_emit_fini_breadcrumb() for
- * where we prepare the padding after the
- * end of the request.
- */
- last->tail = last->wa_tail;
+ ring_set_paused(engine, 1);
+ defer_active(engine);
+
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (!list_is_last(&last->sched.link,
+ &engine->active.requests))
+ return;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ * Apply the wa NOOPs to prevent
+ * ring:HEAD == rq:TAIL as we resubmit the
+ * request. See gen8_emit_fini_breadcrumb() for
+ * where we prepare the padding after the
+ * end of the request.
+ */
+ last->tail = last->wa_tail;
+ }
}
while (rb) { /* XXX virtual is always taking precedence */
@@ -955,9 +1172,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
continue;
}
+ if (i915_request_completed(rq)) {
+ ve->request = NULL;
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+
+ spin_unlock(&ve->base.active.lock);
+
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
+
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this rq for another engine */
+ return; /* leave this for another */
}
GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
@@ -1006,9 +1238,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
__i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
- submit = true;
- last = rq;
+ if (!i915_request_completed(rq)) {
+ submit = true;
+ last = rq;
+ }
}
spin_unlock(&ve->base.active.lock);
@@ -1021,6 +1254,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
+ if (i915_request_completed(rq))
+ goto skip;
+
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -1060,19 +1296,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
-
- if (submit)
- port_assign(port, last);
+ *port = execlists_schedule_in(last, port - execlists->pending);
port++;
-
- GEM_BUG_ON(port_isset(port));
}
- __i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
-
last = rq;
submit = true;
+skip:
+ __i915_request_submit(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -1097,54 +1328,34 @@ done:
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
+ GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
+ engine->name, execlists->queue_priority_hint,
+ yesno(submit));
if (submit) {
- port_assign(port, last);
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ memset(port + 1, 0, (last_port - port) * sizeof(*port));
+ execlists->switch_priority_hint =
+ switch_prio(engine, *execlists->pending);
execlists_submit_ports(engine);
+ } else {
+ ring_set_paused(engine, 0);
}
-
- /* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
- !port_isset(execlists->port));
-
- /* Re-evaluate the executing context setup after each preemptive kick */
- if (last)
- execlists_user_begin(execlists, execlists->port);
-
- /* If the engine is now idle, so should be the flag; and vice versa. */
- GEM_BUG_ON(execlists_is_active(&engine->execlists,
- EXECLISTS_ACTIVE_USER) ==
- !port_isset(engine->execlists.port));
}
-void
-execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void
+cancel_port_requests(struct intel_engine_execlists * const execlists)
{
- struct execlist_port *port = execlists->port;
- unsigned int num_ports = execlists_num_ports(execlists);
-
- while (num_ports-- && port_isset(port)) {
- struct i915_request *rq = port_request(port);
-
- GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
- rq->engine->name,
- (unsigned int)(port - execlists->port),
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
-
- GEM_BUG_ON(!execlists->active);
- execlists_context_schedule_out(rq,
- i915_request_completed(rq) ?
- INTEL_CONTEXT_SCHEDULE_OUT :
- INTEL_CONTEXT_SCHEDULE_PREEMPTED);
+ struct i915_request * const *port, *rq;
- i915_request_put(rq);
+ for (port = execlists->pending; (rq = *port); port++)
+ execlists_schedule_out(rq);
+ memset(execlists->pending, 0, sizeof(execlists->pending));
- memset(port, 0, sizeof(*port));
- port++;
- }
-
- execlists_clear_all_active(execlists);
+ for (port = execlists->active; (rq = *port); port++)
+ execlists_schedule_out(rq);
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
}
static inline void
@@ -1160,15 +1371,100 @@ reset_in_progress(const struct intel_engine_execlists *execlists)
return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}
+enum csb_step {
+ CSB_NOP,
+ CSB_PROMOTE,
+ CSB_PREEMPT,
+ CSB_COMPLETE,
+};
+
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static inline enum csb_step
+gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+{
+ u32 lower_dw = csb[0];
+ u32 upper_dw = csb[1];
+ bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw);
+ bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
+ bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+ if (!ctx_away_valid && ctx_to_valid)
+ return CSB_PROMOTE;
+
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (new_queue && ctx_away_valid)
+ return CSB_PREEMPT;
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
+
+ if (*execlists->active) {
+ GEM_BUG_ON(!ctx_away_valid);
+ return CSB_COMPLETE;
+ }
+
+ return CSB_NOP;
+}
+
+static inline enum csb_step
+gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+{
+ unsigned int status = *csb;
+
+ if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+ return CSB_PROMOTE;
+
+ if (status & GEN8_CTX_STATUS_PREEMPTED)
+ return CSB_PREEMPT;
+
+ if (*execlists->active)
+ return CSB_COMPLETE;
+
+ return CSB_NOP;
+}
+
static void process_csb(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
const u32 * const buf = execlists->csb_status;
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- lockdep_assert_held(&engine->active.lock);
GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
/*
@@ -1198,9 +1494,7 @@ static void process_csb(struct intel_engine_cs *engine)
rmb();
do {
- struct i915_request *rq;
- unsigned int status;
- unsigned int count;
+ enum csb_step csb_step;
if (++head == num_entries)
head = 0;
@@ -1223,68 +1517,43 @@ static void process_csb(struct intel_engine_cs *engine)
* status notifier.
*/
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
engine->name, head,
- buf[2 * head + 0], buf[2 * head + 1],
- execlists->active);
-
- status = buf[2 * head];
- if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
- GEN8_CTX_STATUS_PREEMPTED))
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
-
- if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
- continue;
+ buf[2 * head + 0], buf[2 * head + 1]);
- /* We should never get a COMPLETED | IDLE_ACTIVE! */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
+ if (INTEL_GEN(engine->i915) >= 12)
+ csb_step = gen12_csb_parse(execlists, buf + 2 * head);
+ else
+ csb_step = gen8_csb_parse(execlists, buf + 2 * head);
- if (status & GEN8_CTX_STATUS_COMPLETE &&
- buf[2*head + 1] == execlists->preempt_complete_status) {
- GEM_TRACE("%s preempt-idle\n", engine->name);
- complete_preempt_context(execlists);
- continue;
- }
+ switch (csb_step) {
+ case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", execlists->active);
- if (status & GEN8_CTX_STATUS_PREEMPTED &&
- execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT))
- continue;
+ while (*execlists->active)
+ execlists_schedule_out(*execlists->active++);
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
+ /* fallthrough */
+ case CSB_PROMOTE: /* switch pending to inflight */
+ GEM_BUG_ON(*execlists->active);
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ execlists->active =
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending));
- rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
- engine->name,
- port->context_id, count,
- rq ? rq->fence.context : 0,
- rq ? rq->fence.seqno : 0,
- rq ? hwsp_seqno(rq) : 0,
- rq ? rq_prio(rq) : 0);
+ if (enable_timeslice(execlists))
+ mod_timer(&execlists->timer, jiffies + 1);
- /* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+ if (!inject_preempt_hang(execlists))
+ ring_set_paused(engine, 0);
- GEM_BUG_ON(count == 0);
- if (--count == 0) {
- /*
- * On the final event corresponding to the
- * submission of this context, we expect either
- * an element-switch event or a completion
- * event (and on completion, the active-idle
- * marker). No more preemptions, lite-restore
- * or otherwise.
- */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+ WRITE_ONCE(execlists->pending[0], NULL);
+ break;
+
+ case CSB_COMPLETE: /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
/*
* We rely on the hardware being strongly
@@ -1292,22 +1561,16 @@ static void process_csb(struct intel_engine_cs *engine)
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
- GEM_BUG_ON(!i915_request_completed(rq));
-
- execlists_context_schedule_out(rq,
- INTEL_CONTEXT_SCHEDULE_OUT);
- i915_request_put(rq);
+ GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
+ !reset_in_progress(execlists));
+ execlists_schedule_out(*execlists->active++);
- GEM_TRACE("%s completed ctx=%d\n",
- engine->name, port->context_id);
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ break;
- port = execlists_port_complete(execlists, port);
- if (port_isset(port))
- execlists_user_begin(execlists, port);
- else
- execlists_user_end(execlists);
- } else {
- port_set(port, port_pack(rq, count));
+ case CSB_NOP:
+ break;
}
} while (head != tail);
@@ -1330,9 +1593,7 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
-
- process_csb(engine);
- if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
+ if (!engine->execlists.pending[0])
execlists_dequeue(engine);
}
@@ -1345,14 +1606,21 @@ static void execlists_submission_tasklet(unsigned long data)
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
unsigned long flags;
- GEM_TRACE("%s awake?=%d, active=%x\n",
- engine->name,
- !!intel_wakeref_active(&engine->wakeref),
- engine->execlists.active);
+ process_csb(engine);
+ if (!READ_ONCE(engine->execlists.pending[0])) {
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __execlists_submission_tasklet(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ }
+}
- spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+static void execlists_submission_timer(struct timer_list *timer)
+{
+ struct intel_engine_cs *engine =
+ from_timer(engine, timer, execlists.timer);
+
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void queue_request(struct intel_engine_cs *engine,
@@ -1376,12 +1644,16 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
tasklet_hi_schedule(&execlists->tasklet);
}
-static void submit_queue(struct intel_engine_cs *engine, int prio)
+static void submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
- if (prio > engine->execlists.queue_priority_hint) {
- engine->execlists.queue_priority_hint = prio;
- __submit_queue_imm(engine);
- }
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (rq_prio(rq) <= execlists->queue_priority_hint)
+ return;
+
+ execlists->queue_priority_hint = rq_prio(rq);
+ __submit_queue_imm(engine);
}
static void execlists_submit_request(struct i915_request *request)
@@ -1397,7 +1669,7 @@ static void execlists_submit_request(struct i915_request *request)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
- submit_queue(engine, rq_prio(request));
+ submit_queue(engine, request);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -1405,9 +1677,7 @@ static void execlists_submit_request(struct i915_request *request)
static void __execlists_context_fini(struct intel_context *ce)
{
intel_ring_put(ce->ring);
-
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void execlists_context_destroy(struct kref *kref)
@@ -1420,13 +1690,45 @@ static void execlists_context_destroy(struct kref *kref)
if (ce->state)
__execlists_context_fini(ce);
+ intel_context_fini(ce);
intel_context_free(ce);
}
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
+ vaddr += engine->context_size;
+
+ memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+}
+
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
+ vaddr += engine->context_size;
+
+ if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
+ dev_err_once(engine->i915->drm.dev,
+ "%s context redzone overwritten!\n",
+ engine->name);
+}
+
static void execlists_context_unpin(struct intel_context *ce)
{
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
+ ce->engine);
+
i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
+ intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
@@ -1444,9 +1746,12 @@ __execlists_update_reg_state(struct intel_context *ce,
regs[CTX_RING_TAIL + 1] = ring->tail;
/* RPCS */
- if (engine->class == RENDER_CLASS)
+ if (engine->class == RENDER_CLASS) {
regs[CTX_R_PWR_CLK_STATE + 1] =
intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+
+ i915_oa_init_reg_state(engine, ce, regs);
+ }
}
static int
@@ -1456,19 +1761,12 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr;
int ret;
- GEM_BUG_ON(!ce->gem_context->vm);
-
- ret = execlists_context_deferred_alloc(ce, engine);
- if (ret)
- goto err;
GEM_BUG_ON(!ce->state);
- ret = intel_context_active_acquire(ce,
- engine->i915->ggtt.pin_bias |
- PIN_OFFSET_BIAS |
- PIN_HIGH);
+ ret = intel_context_active_acquire(ce);
if (ret)
goto err;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(engine->i915) |
@@ -1501,6 +1799,11 @@ static int execlists_context_pin(struct intel_context *ce)
return __execlists_context_pin(ce, ce->engine);
}
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return __execlists_context_alloc(ce, ce->engine);
+}
+
static void execlists_context_reset(struct intel_context *ce)
{
/*
@@ -1524,6 +1827,8 @@ static void execlists_context_reset(struct intel_context *ce)
}
static const struct intel_context_ops execlists_context_ops = {
+ .alloc = execlists_context_alloc,
+
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
@@ -1569,8 +1874,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
static int emit_pdps(struct i915_request *rq)
{
const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt =
- i915_vm_to_ppgtt(rq->gem_context->vm);
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
int err, i;
u32 *cs;
@@ -1643,7 +1947,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(request->gem_context->vm))
+ if (i915_vm_is_4lvl(request->hw_context->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@@ -1676,7 +1980,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
/* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_scratch_offset(engine->i915) + 256;
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1690,12 +1995,19 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_scratch_offset(engine->i915) + 256;
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
return batch;
}
+static u32 slm_offset(struct intel_engine_cs *engine)
+{
+ return intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
+}
+
/*
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
* initialized at the beginning and shared across all contexts but this field
@@ -1727,8 +2039,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_scratch_offset(engine->i915) +
- 2 * CACHELINE_BYTES);
+ slm_offset(engine));
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1874,7 +2185,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1914,6 +2225,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return 0;
switch (INTEL_GEN(engine->i915)) {
+ case 12:
case 11:
return 0;
case 10:
@@ -1970,22 +2282,23 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
static void enable_execlists(struct intel_engine_cs *engine)
{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
if (INTEL_GEN(engine->i915) >= 11)
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
- ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
- ENGINE_WRITE(engine,
- RING_HWS_PGA,
- i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
}
@@ -1993,7 +2306,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
{
bool unexpected = false;
- if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) {
+ if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -2041,34 +2354,32 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists));
- intel_engine_stop_cs(engine);
-
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->active.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static bool lrc_regs_ok(const struct i915_request *rq)
-{
- const struct intel_ring *ring = rq->ring;
- const u32 *regs = rq->hw_context->lrc_reg_state;
-
- /* Quick spot check for the common signs of context corruption */
-
- if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
- (RING_CTL_SIZE(ring->size) | RING_VALID))
- return false;
- if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
- return false;
-
- return true;
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ intel_engine_stop_cs(engine);
}
-static void reset_csb_pointers(struct intel_engine_execlists *execlists)
+static void reset_csb_pointers(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
const unsigned int reset_value = execlists->csb_size - 1;
+ ring_set_paused(engine, 0);
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -2088,15 +2399,15 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
static struct i915_request *active_request(struct i915_request *rq)
{
- const struct list_head * const list = &rq->engine->active.requests;
- const struct intel_context * const context = rq->hw_context;
+ const struct list_head * const list = &rq->timeline->requests;
+ const struct intel_context * const ce = rq->hw_context;
struct i915_request *active = NULL;
- list_for_each_entry_from_reverse(rq, list, sched.link) {
+ list_for_each_entry_from_reverse(rq, list, link) {
if (i915_request_completed(rq))
break;
- if (rq->hw_context != context)
+ if (rq->hw_context != ce)
break;
active = rq;
@@ -2115,33 +2426,27 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
process_csb(engine); /* drain preemption events */
/* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(&engine->execlists);
+ reset_csb_pointers(engine);
/*
* Save the currently executing context, even if we completed
* its request, it was still running at the time of the
* reset and will have been clobbered.
*/
- if (!port_isset(execlists->port))
- goto out_clear;
+ rq = execlists_active(execlists);
+ if (!rq)
+ goto unwind;
- rq = port_request(execlists->port);
ce = rq->hw_context;
-
- /*
- * Catch up with any missed context-switch interrupts.
- *
- * Ideally we would just read the remaining CSB entries now that we
- * know the gpu is idle. However, the CSB registers are sometimes^W
- * often trashed across a GPU reset! Instead we have to rely on
- * guessing the missed context-switch events by looking at what
- * requests were completed.
- */
- execlists_cancel_port_requests(execlists);
-
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
rq = active_request(rq);
- if (!rq)
+ if (!rq) {
+ ce->ring->head = ce->ring->tail;
goto out_replay;
+ }
+
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -2155,7 +2460,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* Otherwise, if we have not started yet, the request should replay
* perfectly and we do not need to flag the result as being erroneous.
*/
- if (!i915_request_started(rq) && lrc_regs_ok(rq))
+ if (!i915_request_started(rq))
goto out_replay;
/*
@@ -2169,8 +2474,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
- i915_reset_request(rq, stalled);
- if (!stalled && lrc_regs_ok(rq))
+ __i915_request_reset(rq, stalled);
+ if (!stalled)
goto out_replay;
/*
@@ -2190,17 +2495,15 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
execlists_init_reg_state(regs, ce, engine, ce->ring);
out_replay:
- /* Rerun the request; its payload has been neutered (if guilty). */
- ce->ring->head =
- rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
+ GEM_TRACE("%s replay {head:%04x, tail:%04x\n",
+ engine->name, ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine);
+unwind:
/* Push back any incomplete requests for replay after the reset. */
+ cancel_port_requests(execlists);
__unwind_incomplete_requests(engine);
-
-out_clear:
- execlists_clear_all_active(execlists);
}
static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
@@ -2296,7 +2599,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.func = nop_submission_tasklet;
@@ -2434,7 +2736,8 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
+ intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2499,6 +2802,63 @@ static int gen8_emit_flush_render(struct i915_request *request,
return 0;
}
+static int gen11_emit_flush_render(struct i915_request *request,
+ u32 mode)
+{
+ struct intel_engine_cs *engine = request->engine;
+ const u32 scratch_addr =
+ intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ intel_ring_advance(request, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ intel_ring_advance(request, cs);
+ }
+
+ return 0;
+}
+
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -2514,15 +2874,28 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
return cs;
}
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write(cs,
- request->fence.seqno,
- request->timeline->hwsp_offset,
- 0);
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = intel_hws_preempt_address(request->engine);
+ *cs++ = 0;
+
+ return cs;
+}
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
+ u32 *cs)
+{
*cs++ = MI_USER_INTERRUPT;
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = emit_preempt_busywait(request, cs);
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2530,51 +2903,53 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ request->timeline->hwsp_offset,
+ 0);
+
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
+}
+
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
request->timeline->hwsp_offset,
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE);
+
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_pipe_control(cs,
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL,
0);
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
-
- return gen8_emit_wa_tail(request, cs);
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
}
-static int gen8_init_rcs_context(struct i915_request *rq)
+static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
+ u32 *cs)
{
- int ret;
-
- ret = intel_engine_emit_ctx_wa(rq);
- if (ret)
- return ret;
-
- ret = intel_rcs_context_init_mocs(rq);
- /*
- * Failing to program the MOCS is non-fatal.The system will not
- * run at peak performance. So generate an error and carry on.
- */
- if (ret)
- DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ request->timeline->hwsp_offset,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
- return i915_gem_render_state_emit(rq);
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
}
static void execlists_park(struct intel_engine_cs *engine)
{
- intel_engine_park(engine);
+ del_timer(&engine->execlists.timer);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -2592,11 +2967,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915))
+ if (!intel_vgpu_active(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (engine->preempt_context &&
- HAS_LOGICAL_RING_PREEMPTION(engine->i915))
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ }
}
static void execlists_destroy(struct intel_engine_cs *engine)
@@ -2665,22 +3040,32 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+static void rcs_submission_override(struct intel_engine_cs *engine)
{
- /* Intentionally left blank. */
- engine->buffer = NULL;
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ case 11:
+ engine->emit_flush = gen11_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
+ timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
- if (engine->class == RENDER_CLASS) {
- engine->init_context = gen8_init_rcs_context;
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
- }
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
return 0;
}
@@ -2697,9 +3082,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_engine_init_workarounds(engine);
- intel_engine_init_whitelist(engine);
-
if (intel_init_workaround_bb(engine))
/*
* We continue even if we fail to initialize WA batch
@@ -2718,11 +3100,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
i915_mmio_reg_offset(RING_ELSP(base));
}
- execlists->preempt_complete_status = ~0u;
- if (engine->preempt_context)
- execlists->preempt_complete_status =
- upper_32_bits(engine->preempt_context->lrc_desc);
-
execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2734,7 +3111,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
else
execlists->csb_size = GEN11_CSB_ENTRIES;
- reset_csb_pointers(execlists);
+ reset_csb_pointers(engine);
return 0;
}
@@ -2747,6 +3124,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
default:
MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
+ case 12:
+ indirect_ctx_offset =
+ GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
case 11:
indirect_ctx_offset =
GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@@ -2773,7 +3154,7 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
@@ -2864,8 +3245,6 @@ static void execlists_init_reg_state(u32 *regs,
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
-
- i915_oa_init_reg_state(engine, ce, regs);
}
regs[CTX_END] = MI_BATCH_BUFFER_END;
@@ -2890,6 +3269,8 @@ populate_lr_context(struct intel_context *ce,
return ret;
}
+ set_redzone(vaddr, engine);
+
if (engine->default_state) {
/*
* We only want to copy over the template context state;
@@ -2917,11 +3298,6 @@ populate_lr_context(struct intel_context *ce,
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ce->gem_context == engine->i915->preempt_context &&
- INTEL_GEN(engine->i915) < 11)
- regs[CTX_CONTEXT_CONTROL + 1] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
ret = 0;
err_unpin_ctx:
@@ -2932,27 +3308,16 @@ err_unpin_ctx:
return ret;
}
-static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
-{
- if (ctx->timeline)
- return i915_timeline_get(ctx->timeline);
- else
- return i915_timeline_create(ctx->i915, NULL);
-}
-
-static int execlists_context_deferred_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine)
+static int __execlists_context_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
+ struct intel_ring *ring;
struct i915_vma *vma;
u32 context_size;
- struct intel_ring *ring;
- struct i915_timeline *timeline;
int ret;
- if (ce->state)
- return 0;
-
+ GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
/*
@@ -2960,27 +3325,32 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
* for our own use and for sharing with the GuC.
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
- timeline = get_timeline(ce->gem_context);
- if (IS_ERR(timeline)) {
- ret = PTR_ERR(timeline);
- goto error_deref_obj;
+ if (!ce->timeline) {
+ struct intel_timeline *tl;
+
+ tl = intel_timeline_create(engine->gt, NULL);
+ if (IS_ERR(tl)) {
+ ret = PTR_ERR(tl);
+ goto error_deref_obj;
+ }
+
+ ce->timeline = tl;
}
- ring = intel_engine_create_ring(engine,
- timeline,
- ce->gem_context->ring_size);
- i915_timeline_put(timeline);
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
@@ -3038,6 +3408,7 @@ static void virtual_context_destroy(struct kref *kref)
if (ve->context.state)
__execlists_context_fini(&ve->context);
+ intel_context_fini(&ve->context);
kfree(ve->bonds);
kfree(ve);
@@ -3090,6 +3461,8 @@ static void virtual_context_enter(struct intel_context *ce)
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
}
static void virtual_context_exit(struct intel_context *ce)
@@ -3097,6 +3470,8 @@ static void virtual_context_exit(struct intel_context *ce)
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
unsigned int n;
+ intel_timeline_exit(ce->timeline);
+
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_put(ve->siblings[n]);
}
@@ -3290,11 +3665,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
return ERR_PTR(-ENOMEM);
ve->base.i915 = ctx->i915;
+ ve->base.gt = siblings[0]->gt;
ve->base.id = -1;
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
- ve->base.flags = I915_ENGINE_IS_VIRTUAL;
/*
* The decision on whether to submit a request using semaphores
@@ -3391,8 +3766,18 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
ve->base.emit_fini_breadcrumb_dw =
sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
}
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
+ err = __execlists_context_alloc(&ve->context, siblings[0]);
+ if (err)
+ goto err_put;
+
+ __set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags);
+
return &ve->context;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 6bf34738b4e5..b8f20ad71169 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -64,5 +64,6 @@
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
#define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A
+#define GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0xD
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 1f9db50b1869..728704bbbe18 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -23,6 +23,7 @@
#include "i915_drv.h"
#include "intel_engine.h"
+#include "intel_gt.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
@@ -61,6 +62,10 @@ struct drm_i915_mocs_table {
#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
+/*
+ * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
+ * the same as LE_UC
+ */
#define LE_0_PAGETABLE _LE_CACHEABILITY(0)
#define LE_1_UC _LE_CACHEABILITY(1)
#define LE_2_WT _LE_CACHEABILITY(2)
@@ -99,8 +104,9 @@ struct drm_i915_mocs_table {
* of bspec.
*
* Entries not part of the following tables are undefined as far as
- * userspace is concerned and shouldn't be relied upon. For the time
- * being they will be initialized to PTE.
+ * userspace is concerned and shouldn't be relied upon. For Gen < 12
+ * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
+ * PTE and will be initialized to an invalid value.
*
* The last two entries are reserved by the hardware. For ICL+ they
* should be initialized according to bspec and never used, for older
@@ -136,14 +142,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
};
#define GEN11_MOCS_ENTRIES \
- /* Base - Uncached (Deprecated) */ \
- MOCS_ENTRY(I915_MOCS_UNCACHED, \
- LE_1_UC | LE_TC_1_LLC, \
- L3_1_UC), \
- /* Base - L3 + LeCC:PAT (Deprecated) */ \
- MOCS_ENTRY(I915_MOCS_PTE, \
- LE_0_PAGETABLE | LE_TC_1_LLC, \
- L3_3_WB), \
+ /* Entries 0 and 1 are defined per-platform */ \
/* Base - L3 + LLC */ \
MOCS_ENTRY(2, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
@@ -241,49 +240,86 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC)
+static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
+ /* Base - Error (Reserved for Non-Use) */
+ MOCS_ENTRY(0, 0x0, 0x0),
+ /* Base - Reserved */
+ MOCS_ENTRY(1, 0x0, 0x0),
+
+ GEN11_MOCS_ENTRIES,
+
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
+ L3_3_WB),
+};
+
static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+ /* Base - Uncached (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_UNCACHED,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* Base - L3 + LeCC:PAT (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_PTE,
+ LE_0_PAGETABLE | LE_TC_1_LLC,
+ L3_3_WB),
+
GEN11_MOCS_ENTRIES
};
-/**
- * get_mocs_settings()
- * @dev_priv: i915 device.
- * @table: Output table that will be made to point at appropriate
- * MOCS values for the device.
- *
- * This function will return the values of the MOCS table that needs to
- * be programmed for the platform. It will return the values that need
- * to be programmed and if they need to be programmed.
- *
- * Return: true if there are applicable MOCS settings for the device.
- */
-static bool get_mocs_settings(struct drm_i915_private *dev_priv,
+static bool get_mocs_settings(struct intel_gt *gt,
struct drm_i915_mocs_table *table)
{
+ struct drm_i915_private *i915 = gt->i915;
bool result = false;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 12) {
+ table->size = ARRAY_SIZE(tigerlake_mocs_table);
+ table->table = tigerlake_mocs_table;
+ table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ result = true;
+ } else if (IS_GEN(i915, 11)) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
result = true;
- } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = skylake_mocs_table;
result = true;
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEN9_LP(i915)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
result = true;
} else {
- WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
+ WARN_ONCE(INTEL_GEN(i915) >= 9,
"Platform that should have a MOCS table does not.\n");
}
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
- if (IS_GEN(dev_priv, 9)) {
+ if (IS_GEN(i915, 9)) {
int i;
for (i = 0; i < table->size; i++)
@@ -338,12 +374,20 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table,
*/
void intel_mocs_init_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_gt *gt = engine->gt;
+ struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
unsigned int index;
u32 unused_value;
- if (!get_mocs_settings(dev_priv, &table))
+ /* Platforms with global MOCS do not need per-engine initialization. */
+ if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
+ return;
+
+ /* Called under a blanket forcewake */
+ assert_forcewakes_active(uncore, FORCEWAKE_ALL);
+
+ if (!get_mocs_settings(gt, &table))
return;
/* Set unused values to PTE */
@@ -352,24 +396,48 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
for (index = 0; index < table.size; index++) {
u32 value = get_entry_control(&table, index);
- I915_WRITE(mocs_register(engine->id, index), value);
+ intel_uncore_write_fw(uncore,
+ mocs_register(engine->id, index),
+ value);
}
/* All remaining entries are also unused */
for (; index < table.n_entries; index++)
- I915_WRITE(mocs_register(engine->id, index), unused_value);
+ intel_uncore_write_fw(uncore,
+ mocs_register(engine->id, index),
+ unused_value);
+}
+
+static void intel_mocs_init_global(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ struct drm_i915_mocs_table table;
+ unsigned int index;
+
+ GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
+
+ if (!get_mocs_settings(gt, &table))
+ return;
+
+ if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
+ return;
+
+ for (index = 0; index < table.size; index++)
+ intel_uncore_write(uncore,
+ GEN12_GLOBAL_MOCS(index),
+ table.table[index].control_value);
+
+ /*
+ * Ok, now set the unused entries to the invalid entry (index 0). These
+ * entries are officially undefined and no contract for the contents and
+ * settings is given for these entries.
+ */
+ for (; index < table.n_entries; index++)
+ intel_uncore_write(uncore,
+ GEN12_GLOBAL_MOCS(index),
+ table.table[0].control_value);
}
-/**
- * emit_mocs_control_table() - emit the mocs control table
- * @rq: Request to set up the MOCS table for.
- * @table: The values to program into the control regs.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address.
- *
- * Return: 0 on success, otherwise the error status.
- */
static int emit_mocs_control_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
@@ -429,17 +497,6 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
return low | high << 16;
}
-/**
- * emit_mocs_l3cc_table() - emit the mocs control table
- * @rq: Request to set up the MOCS table for.
- * @table: The values to program into the control regs.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address. This register set is
- * programmed in pairs.
- *
- * Return: 0 on success, otherwise the error status.
- */
static int emit_mocs_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
@@ -488,27 +545,14 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
return 0;
}
-/**
- * intel_mocs_init_l3cc_table() - program the mocs control table
- * @dev_priv: i915 device private
- *
- * This function simply programs the mocs registers for the given table
- * starting at the given address. This register set is programmed in pairs.
- *
- * These registers may get programmed more than once, it is simpler to
- * re-program 32 registers than maintain the state of when they were programmed.
- * We are always reprogramming with the same values and this only on context
- * start.
- *
- * Return: Nothing.
- */
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
+static void intel_mocs_init_l3cc_table(struct intel_gt *gt)
{
+ struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
unsigned int i;
u16 unused_value;
- if (!get_mocs_settings(dev_priv, &table))
+ if (!get_mocs_settings(gt, &table))
return;
/* Set unused values to PTE */
@@ -518,28 +562,32 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
u16 low = get_entry_l3cc(&table, 2 * i);
u16 high = get_entry_l3cc(&table, 2 * i + 1);
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, high));
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, low, high));
}
/* Odd table size - 1 left over */
if (table.size & 0x01) {
u16 low = get_entry_l3cc(&table, 2 * i);
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, unused_value));
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, low, unused_value));
i++;
}
/* All remaining entries are also unused */
for (; i < table.n_entries / 2; i++)
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, unused_value, unused_value));
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, unused_value,
+ unused_value));
}
/**
- * intel_rcs_context_init_mocs() - program the MOCS register.
- * @rq: Request to set up the MOCS tables for.
+ * intel_mocs_emit() - program the MOCS register.
+ * @rq: Request to use to set up the MOCS tables.
*
* This function will emit a batch buffer with the values required for
* programming the MOCS register values for all the currently supported
@@ -553,12 +601,16 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
*
* Return: 0 on success, otherwise the error status.
*/
-int intel_rcs_context_init_mocs(struct i915_request *rq)
+int intel_mocs_emit(struct i915_request *rq)
{
struct drm_i915_mocs_table t;
int ret;
- if (get_mocs_settings(rq->i915, &t)) {
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ||
+ rq->engine->class != RENDER_CLASS)
+ return 0;
+
+ if (get_mocs_settings(rq->engine->gt, &t)) {
/* Program the RCS control registers */
ret = emit_mocs_control_table(rq, &t);
if (ret)
@@ -572,3 +624,11 @@ int intel_rcs_context_init_mocs(struct i915_request *rq)
return 0;
}
+
+void intel_mocs_init(struct intel_gt *gt)
+{
+ intel_mocs_init_l3cc_table(gt);
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
+ intel_mocs_init_global(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 0913704a1af2..2ae816b7ca19 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,12 +49,13 @@
* context handling keep the MOCS in step.
*/
-struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
-int intel_rcs_context_init_mocs(struct i915_request *rq);
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
+void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
+int intel_mocs_emit(struct i915_request *rq);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 4ee032072d4f..6d05f9c64178 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -26,10 +26,9 @@
*/
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
#include "intel_renderstate.h"
-struct intel_render_state {
+struct intel_renderstate {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -42,7 +41,7 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
- if (engine->id != RCS0)
+ if (engine->class != RENDER_CLASS)
return NULL;
switch (INTEL_GEN(engine->i915)) {
@@ -75,7 +74,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
(batch)[(i)++] = (val); \
} while(0)
-static int render_state_setup(struct intel_render_state *so,
+static int render_state_setup(struct intel_renderstate *so,
struct drm_i915_private *i915)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -177,10 +176,10 @@ err:
#undef OUT_BATCH
-int i915_gem_render_state_emit(struct i915_request *rq)
+int intel_renderstate_emit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct intel_render_state so = {}; /* keep the compiler happy */
+ struct intel_renderstate so = {}; /* keep the compiler happy */
int err;
so.rodata = render_state_get_rodata(engine);
@@ -194,7 +193,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
@@ -223,7 +222,9 @@ int i915_gem_render_state_emit(struct i915_request *rq)
}
i915_vma_lock(so.vma);
- err = i915_vma_move_to_active(so.vma, rq, 0);
+ err = i915_request_await_object(rq, so.vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(so.vma, rq, 0);
i915_vma_unlock(so.vma);
err_unpin:
i915_vma_unpin(so.vma);
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 08f6fea05a2c..8d5079145054 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -21,11 +21,13 @@
* DEALINGS IN THE SOFTWARE.
*/
-#ifndef _INTEL_RENDERSTATE_H
-#define _INTEL_RENDERSTATE_H
+#ifndef _INTEL_RENDERSTATE_H_
+#define _INTEL_RENDERSTATE_H_
#include <linux/types.h>
+struct i915_request;
+
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
@@ -44,4 +46,6 @@ extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
extern const struct intel_renderstate_rodata gen9_null_state;
-#endif /* INTEL_RENDERSTATE_H */
+int intel_renderstate_emit(struct i915_request *rq);
+
+#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3f907701ef4d..b9d84d52e986 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -7,6 +7,7 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
+#include "display/intel_display_types.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -15,26 +16,17 @@
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_reset.h"
-#include "intel_guc.h"
+#include "uc/intel_guc.h"
#define RESET_MAX_RETRIES 3
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
-static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
- intel_uncore_rmw(uncore, reg, 0, set);
-}
-
-static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
- intel_uncore_rmw(uncore, reg, clr, 0);
-}
-
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
intel_uncore_rmw_fw(uncore, reg, 0, set);
@@ -123,7 +115,7 @@ static void context_mark_innocent(struct i915_gem_context *ctx)
atomic_inc(&ctx->active_count);
}
-void i915_reset_request(struct i915_request *rq, bool guilty)
+void __i915_request_reset(struct i915_request *rq, bool guilty)
{
GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
rq->engine->name,
@@ -144,48 +136,6 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
}
}
-static void gen3_stop_engine(struct intel_engine_cs *engine)
-{
- struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
-
- GEM_TRACE("%s\n", engine->name);
-
- if (intel_engine_stop_cs(engine))
- GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
-
- intel_uncore_write_fw(uncore,
- RING_HEAD(base),
- intel_uncore_read_fw(uncore, RING_TAIL(base)));
- intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
- intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
- intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
- intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- intel_uncore_write_fw(uncore, RING_CTL(base), 0);
-
- /* Check acts as a post */
- if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- GEM_TRACE("%s: ring head [%x] not parked\n",
- engine->name,
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
-}
-
-static void i915_stop_engines(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (INTEL_GEN(i915) < 3)
- return;
-
- for_each_engine_masked(engine, i915, engine_mask, tmp)
- gen3_stop_engine(engine);
-}
-
static bool i915_in_reset(struct pci_dev *pdev)
{
u8 gdrst;
@@ -194,11 +144,11 @@ static bool i915_in_reset(struct pci_dev *pdev)
return gdrst & GRDOM_RESET_STATUS;
}
-static int i915_do_reset(struct drm_i915_private *i915,
+static int i915_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
@@ -223,22 +173,22 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_i915_private *i915,
+static int g33_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
-static int g4x_do_reset(struct drm_i915_private *i915,
+static int g4x_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct intel_uncore *uncore = &i915->uncore;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct intel_uncore *uncore = gt->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
@@ -270,11 +220,11 @@ out:
return ret;
}
-static int ironlake_do_reset(struct drm_i915_private *i915,
+static int ironlake_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
int ret;
intel_uncore_write_fw(uncore, ILK_GDSR,
@@ -306,10 +256,9 @@ out:
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *i915,
- u32 hw_domain_mask)
+static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
int err;
/*
@@ -331,7 +280,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *i915,
return err;
}
-static int gen6_reset_engines(struct drm_i915_private *i915,
+static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -351,13 +300,13 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
}
}
- return gen6_hw_domain_reset(i915, hw_mask);
+ return gen6_hw_domain_reset(gt, hw_mask);
}
static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
@@ -455,7 +404,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
}
-static int gen11_reset_engines(struct drm_i915_private *i915,
+static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -478,17 +427,17 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
hw_mask |= gen11_lock_sfc(engine);
}
}
- ret = gen6_hw_domain_reset(i915, hw_mask);
+ ret = gen6_hw_domain_reset(gt, hw_mask);
if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
gen11_unlock_sfc(engine);
return ret;
@@ -538,7 +487,7 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
-static int gen8_reset_engines(struct drm_i915_private *i915,
+static int gen8_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -547,7 +496,7 @@ static int gen8_reset_engines(struct drm_i915_private *i915,
intel_engine_mask_t tmp;
int ret;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
@@ -563,23 +512,23 @@ static int gen8_reset_engines(struct drm_i915_private *i915,
* We rather take context corruption instead of
* failed reset with a wedged driver/gpu. And
* active bb execution case should be covered by
- * i915_stop_engines we have before the reset.
+ * stop_engines() we have before the reset.
*/
}
- if (INTEL_GEN(i915) >= 11)
- ret = gen11_reset_engines(i915, engine_mask, retry);
+ if (INTEL_GEN(gt->i915) >= 11)
+ ret = gen11_reset_engines(gt, engine_mask, retry);
else
- ret = gen6_reset_engines(i915, engine_mask, retry);
+ ret = gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
return ret;
}
-typedef int (*reset_func)(struct drm_i915_private *,
+typedef int (*reset_func)(struct intel_gt *,
intel_engine_mask_t engine_mask,
unsigned int retry);
@@ -601,15 +550,14 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
return NULL;
}
-int intel_gpu_reset(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
int ret = -ETIMEDOUT;
int retry;
- reset = intel_get_gpu_reset(i915);
+ reset = intel_get_gpu_reset(gt->i915);
if (!reset)
return -ENODEV;
@@ -617,31 +565,14 @@ int intel_gpu_reset(struct drm_i915_private *i915,
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- if (retry)
- i915_stop_engines(i915, engine_mask);
-
GEM_TRACE("engine_mask=%x\n", engine_mask);
preempt_disable();
- ret = reset(i915, engine_mask, retry);
+ ret = reset(gt, engine_mask, retry);
preempt_enable();
}
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -659,17 +590,17 @@ bool intel_has_reset_engine(struct drm_i915_private *i915)
return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
}
-int intel_reset_guc(struct drm_i915_private *i915)
+int intel_reset_guc(struct intel_gt *gt)
{
u32 guc_domain =
- INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+ INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
int ret;
- GEM_BUG_ON(!HAS_GUC(i915));
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(i915, guc_domain);
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ ret = gen6_hw_domain_reset(gt, guc_domain);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -691,56 +622,55 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
engine->reset.prepare(engine);
}
-static void revoke_mmaps(struct drm_i915_private *i915)
+static void revoke_mmaps(struct intel_gt *gt)
{
int i;
- for (i = 0; i < i915->ggtt.num_fences; i++) {
+ for (i = 0; i < gt->ggtt->num_fences; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
- vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
+ vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
- GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
+ GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
node = &vma->obj->base.vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
- unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+ unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
}
}
-static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
}
- intel_uc_reset_prepare(i915);
+ intel_uc_reset_prepare(&gt->uc);
return awake;
}
-static void gt_revoke(struct drm_i915_private *i915)
+static void gt_revoke(struct intel_gt *gt)
{
- revoke_mmaps(i915);
+ revoke_mmaps(gt);
}
-static int gt_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask)
+static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -750,14 +680,14 @@ static int gt_reset(struct drm_i915_private *i915,
* Everything depends on having the GTT running, so we need to start
* there.
*/
- err = i915_ggtt_enable_hw(i915);
+ err = i915_ggtt_enable_hw(gt->i915);
if (err)
return err;
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, stalled_mask & engine->mask);
+ for_each_engine(engine, gt->i915, id)
+ __intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(i915);
+ i915_gem_restore_fences(gt->i915);
return err;
}
@@ -770,13 +700,12 @@ static void reset_finish_engine(struct intel_engine_cs *engine)
intel_engine_signal_breadcrumbs(engine);
}
-static void reset_finish(struct drm_i915_private *i915,
- intel_engine_mask_t awake)
+static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
reset_finish_engine(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
@@ -800,20 +729,19 @@ static void nop_submit_request(struct i915_request *request)
intel_engine_queue_breadcrumbs(engine);
}
-static void __i915_gem_set_wedged(struct drm_i915_private *i915)
+static void __intel_gt_set_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_engine_mask_t awake;
enum intel_engine_id id;
- if (test_bit(I915_WEDGED, &error->flags))
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
return;
- if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
+ if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
struct drm_printer p = drm_debug_printer(__func__);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt->i915, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
@@ -824,17 +752,14 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
/* Even if the GPU reset fails, it should still stop the engines */
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_gpu_reset(i915, ALL_ENGINES);
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id)
engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
@@ -842,37 +767,37 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* in nop_submit_request.
*/
synchronize_rcu_expedited();
- set_bit(I915_WEDGED, &error->flags);
+ set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt->i915, id)
engine->cancel_requests(engine);
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
GEM_TRACE("end\n");
}
-void i915_gem_set_wedged(struct drm_i915_private *i915)
+void intel_gt_set_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
intel_wakeref_t wakeref;
- mutex_lock(&error->wedge_mutex);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- __i915_gem_set_wedged(i915);
- mutex_unlock(&error->wedge_mutex);
+ mutex_lock(&gt->reset.mutex);
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ __intel_gt_set_wedged(gt);
+ mutex_unlock(&gt->reset.mutex);
}
-static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
+static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
- struct i915_timeline *tl;
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl;
+ unsigned long flags;
- if (!test_bit(I915_WEDGED, &error->flags))
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
return true;
- if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
+ if (!gt->scratch) /* Never full initialised, recovery impossible */
return false;
GEM_TRACE("start\n");
@@ -887,14 +812,16 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
*
* No more can be submitted until we reset the wedged bit.
*/
- mutex_lock(&i915->gt.timelines.mutex);
- list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
/*
* All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting
@@ -904,10 +831,14 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
*/
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
+
+ /* Restart iteration after droping lock */
+ spin_lock_irqsave(&timelines->lock, flags);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&i915->gt.timelines.mutex);
+ spin_unlock_irqrestore(&timelines->lock, flags);
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(gt, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -918,53 +849,51 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
* the nop_submit_request on reset, we can do this from normal
* context and do not require stop_machine().
*/
- intel_engines_reset_default_submission(i915);
+ intel_engines_reset_default_submission(gt);
GEM_TRACE("end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
- clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+ clear_bit(I915_WEDGED, &gt->reset.flags);
return true;
}
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+bool intel_gt_unset_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
bool result;
- mutex_lock(&error->wedge_mutex);
- result = __i915_gem_unset_wedged(i915);
- mutex_unlock(&error->wedge_mutex);
+ mutex_lock(&gt->reset.mutex);
+ result = __intel_gt_unset_wedged(gt);
+ mutex_unlock(&gt->reset.mutex);
return result;
}
-static int do_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask)
+static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- gt_revoke(i915);
+ gt_revoke(gt);
- err = intel_gpu_reset(i915, ALL_ENGINES);
+ err = __intel_gt_reset(gt, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
- err = intel_gpu_reset(i915, ALL_ENGINES);
+ err = __intel_gt_reset(gt, ALL_ENGINES);
}
if (err)
return err;
- return gt_reset(i915, stalled_mask);
+ return gt_reset(gt, stalled_mask);
}
-static int resume(struct drm_i915_private *i915)
+static int resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
ret = engine->resume(engine);
if (ret)
return ret;
@@ -974,8 +903,8 @@ static int resume(struct drm_i915_private *i915)
}
/**
- * i915_reset - reset chip after a hang
- * @i915: #drm_i915_private to reset
+ * intel_gt_reset - reset chip after a hang
+ * @gt: #intel_gt to reset
* @stalled_mask: mask of the stalled engines with the guilty requests
* @reason: user error message for why we are resetting
*
@@ -990,50 +919,50 @@ static int resume(struct drm_i915_private *i915)
* - re-init interrupt state
* - re-init display
*/
-void i915_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask,
- const char *reason)
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason)
{
- struct i915_gpu_error *error = &i915->gpu_error;
intel_engine_mask_t awake;
int ret;
- GEM_TRACE("flags=%lx\n", error->flags);
+ GEM_TRACE("flags=%lx\n", gt->reset.flags);
might_sleep();
- GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
- mutex_lock(&error->wedge_mutex);
+ GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+ mutex_lock(&gt->reset.mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!__i915_gem_unset_wedged(i915))
+ if (!__intel_gt_unset_wedged(gt))
goto unlock;
if (reason)
- dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
- error->reset_count++;
+ dev_notice(gt->i915->drm.dev,
+ "Resetting chip for %s\n", reason);
+ atomic_inc(&gt->i915->gpu_error.reset_count);
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
- if (!intel_has_gpu_reset(i915)) {
+ if (!intel_has_gpu_reset(gt->i915)) {
if (i915_modparams.reset)
- dev_err(i915->drm.dev, "GPU reset not supported\n");
+ dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_disable_interrupts(i915);
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_disable_interrupts(gt->i915);
- if (do_reset(i915, stalled_mask)) {
- dev_err(i915->drm.dev, "Failed to reset chip\n");
+ if (do_reset(gt, stalled_mask)) {
+ dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_enable_interrupts(i915);
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_enable_interrupts(gt->i915);
- intel_overlay_reset(i915);
+ intel_overlay_reset(gt->i915);
/*
* Next we need to restore the context, but we don't use those
@@ -1043,23 +972,23 @@ void i915_reset(struct drm_i915_private *i915,
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(i915);
+ ret = i915_gem_init_hw(gt->i915);
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
goto taint;
}
- ret = resume(i915);
+ ret = resume(gt);
if (ret)
goto taint;
- i915_queue_hangcheck(i915);
+ intel_gt_queue_hangcheck(gt);
finish:
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
unlock:
- mutex_unlock(&error->wedge_mutex);
+ mutex_unlock(&gt->reset.mutex);
return;
taint:
@@ -1077,18 +1006,17 @@ taint:
*/
add_taint_for_CI(TAINT_WARN);
error:
- __i915_gem_set_wedged(i915);
+ __intel_gt_set_wedged(gt);
goto finish;
}
-static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
- struct intel_engine_cs *engine)
+static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
- return intel_gpu_reset(i915, engine->mask);
+ return __intel_gt_reset(engine->gt, engine->mask);
}
/**
- * i915_reset_engine - reset GPU engine to recover from a hang
+ * intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
* @msg: reason for GPU reset; or NULL for no dev_notice()
*
@@ -1100,13 +1028,13 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
-int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
{
- struct i915_gpu_error *error = &engine->i915->gpu_error;
+ struct intel_gt *gt = engine->gt;
int ret;
- GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
- GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
if (!intel_engine_pm_get_if_awake(engine))
return 0;
@@ -1116,16 +1044,16 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
if (msg)
dev_notice(engine->i915->drm.dev,
"Resetting %s for %s\n", engine->name, msg);
- error->reset_engine_count[engine->id]++;
+ atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
- if (!engine->i915->guc.execbuf_client)
- ret = intel_gt_reset_engine(engine->i915, engine);
+ if (!engine->gt->uc.guc.execbuf_client)
+ ret = intel_gt_reset_engine(engine);
else
- ret = intel_guc_reset_engine(&engine->i915->guc, engine);
+ ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- engine->i915->guc.execbuf_client ? "GuC " : "",
+ engine->gt->uc.guc.execbuf_client ? "GuC " : "",
engine->name, ret);
goto out;
}
@@ -1135,7 +1063,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* active request and can drop it, adjust head to skip the offending
* request to resume executing remaining requests in the queue.
*/
- intel_engine_reset(engine, true);
+ __intel_engine_reset(engine, true);
/*
* The engine and its registers (and workarounds in case of render)
@@ -1151,16 +1079,15 @@ out:
return ret;
}
-static void i915_reset_device(struct drm_i915_private *i915,
- u32 engine_mask,
- const char *reason)
+static void intel_gt_reset_global(struct intel_gt *gt,
+ u32 engine_mask,
+ const char *reason)
{
- struct i915_gpu_error *error = &i915->gpu_error;
- struct kobject *kobj = &i915->drm.primary->kdev->kobj;
+ struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- struct i915_wedge_me w;
+ struct intel_wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
@@ -1168,137 +1095,24 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
- i915_wedge_on_timeout(&w, i915, 5 * HZ) {
- intel_prepare_reset(i915);
+ intel_wedge_on_timeout(&w, gt, 5 * HZ) {
+ intel_prepare_reset(gt->i915);
/* Flush everyone using a resource about to be clobbered */
- synchronize_srcu_expedited(&error->reset_backoff_srcu);
+ synchronize_srcu_expedited(&gt->reset.backoff_srcu);
- i915_reset(i915, engine_mask, reason);
+ intel_gt_reset(gt, engine_mask, reason);
- intel_finish_reset(i915);
+ intel_finish_reset(gt->i915);
}
- if (!test_bit(I915_WEDGED, &error->flags))
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
-static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
-{
- intel_uncore_rmw(uncore, reg, 0, 0);
-}
-
-static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
-{
- GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
- GEN6_RING_FAULT_REG_POSTING_READ(engine);
-}
-
-static void clear_error_registers(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 eir;
-
- if (!IS_GEN(i915, 2))
- clear_register(uncore, PGTBL_ER);
-
- if (INTEL_GEN(i915) < 4)
- clear_register(uncore, IPEIR(RENDER_RING_BASE));
- else
- clear_register(uncore, IPEIR_I965);
-
- clear_register(uncore, EIR);
- eir = intel_uncore_read(uncore, EIR);
- if (eir) {
- /*
- * some errors might have become stuck,
- * mask them.
- */
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- rmw_set(uncore, EMR, eir);
- intel_uncore_write(uncore, GEN2_IIR,
- I915_MASTER_ERROR_INTERRUPT);
- }
-
- if (INTEL_GEN(i915) >= 8) {
- rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
- intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(i915) >= 6) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine_masked(engine, i915, engine_mask, id)
- gen8_clear_engine_error_register(engine);
- }
-}
-
-static void gen6_check_faults(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 fault;
-
- for_each_engine(engine, dev_priv, id) {
- fault = GEN6_RING_FAULT_REG_READ(engine);
- if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
- }
-}
-
-static void gen8_check_faults(struct drm_i915_private *dev_priv)
-{
- u32 fault = I915_READ(GEN8_RING_FAULT_REG);
-
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
- fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
-}
-
-void i915_check_and_clear_faults(struct drm_i915_private *i915)
-{
- /* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (INTEL_GEN(i915) >= 8)
- gen8_check_faults(i915);
- else if (INTEL_GEN(i915) >= 6)
- gen6_check_faults(i915);
- else
- return;
-
- clear_error_registers(i915, ALL_ENGINES);
-}
-
/**
- * i915_handle_error - handle a gpu error
- * @i915: i915 device private
+ * intel_gt_handle_error - handle a gpu error
+ * @gt: the intel_gt
* @engine_mask: mask representing engines that are hung
* @flags: control flags
* @fmt: Error message format string
@@ -1309,12 +1123,11 @@ void i915_check_and_clear_faults(struct drm_i915_private *i915)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- unsigned long flags,
- const char *fmt, ...)
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...)
{
- struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
intel_engine_mask_t tmp;
@@ -1338,33 +1151,31 @@ void i915_handle_error(struct drm_i915_private *i915,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- engine_mask &= INTEL_INFO(i915)->engine_mask;
+ engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(i915, engine_mask, msg);
- clear_error_registers(i915, engine_mask);
+ i915_capture_error_state(gt->i915, engine_mask, msg);
+ intel_gt_clear_error_registers(gt, engine_mask);
}
/*
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &error->flags))
+ &gt->reset.flags))
continue;
- if (i915_reset_engine(engine, msg) == 0)
+ if (intel_engine_reset(engine, msg) == 0)
engine_mask &= ~engine->mask;
- clear_bit(I915_RESET_ENGINE + engine->id,
- &error->flags);
- wake_up_bit(&error->flags,
- I915_RESET_ENGINE + engine->id);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
}
}
@@ -1372,9 +1183,9 @@ void i915_handle_error(struct drm_i915_private *i915,
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
- wait_event(error->reset_queue,
- !test_bit(I915_RESET_BACKOFF, &error->flags));
+ if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
goto out; /* piggy-back on the other reset */
}
@@ -1382,113 +1193,119 @@ void i915_handle_error(struct drm_i915_private *i915,
synchronize_rcu_expedited();
/* Prevent any other reset-engine attempt. */
- for_each_engine(engine, i915, tmp) {
+ for_each_engine(engine, gt->i915, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &error->flags))
- wait_on_bit(&error->flags,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
- i915_reset_device(i915, engine_mask, msg);
+ intel_gt_reset_global(gt, engine_mask, msg);
- for_each_engine(engine, i915, tmp) {
- clear_bit(I915_RESET_ENGINE + engine->id,
- &error->flags);
- }
-
- clear_bit(I915_RESET_BACKOFF, &error->flags);
- wake_up_all(&error->reset_queue);
+ for_each_engine(engine, gt->i915, tmp)
+ clear_bit_unlock(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
+ smp_mb__after_atomic();
+ wake_up_all(&gt->reset.queue);
out:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
}
-int i915_reset_trylock(struct drm_i915_private *i915)
+int intel_gt_reset_trylock(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
int srcu;
- might_lock(&error->reset_backoff_srcu);
+ might_lock(&gt->reset.backoff_srcu);
might_sleep();
rcu_read_lock();
- while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
+ while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
rcu_read_unlock();
- if (wait_event_interruptible(error->reset_queue,
+ if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
- &error->flags)))
+ &gt->reset.flags)))
return -EINTR;
rcu_read_lock();
}
- srcu = srcu_read_lock(&error->reset_backoff_srcu);
+ srcu = srcu_read_lock(&gt->reset.backoff_srcu);
rcu_read_unlock();
return srcu;
}
-void i915_reset_unlock(struct drm_i915_private *i915, int tag)
-__releases(&i915->gpu_error.reset_backoff_srcu)
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
+__releases(&gt->reset.backoff_srcu)
{
- struct i915_gpu_error *error = &i915->gpu_error;
-
- srcu_read_unlock(&error->reset_backoff_srcu, tag);
+ srcu_read_unlock(&gt->reset.backoff_srcu, tag);
}
-int i915_terminally_wedged(struct drm_i915_private *i915)
+int intel_gt_terminally_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
-
might_sleep();
- if (!__i915_wedged(error))
+ if (!intel_gt_is_wedged(gt))
return 0;
/* Reset still in progress? Maybe we will recover? */
- if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+ if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
return -EIO;
/* XXX intel_reset_finish() still takes struct_mutex!!! */
- if (mutex_is_locked(&i915->drm.struct_mutex))
+ if (mutex_is_locked(&gt->i915->drm.struct_mutex))
return -EAGAIN;
- if (wait_event_interruptible(error->reset_queue,
+ if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
- &error->flags)))
+ &gt->reset.flags)))
return -EINTR;
- return __i915_wedged(error) ? -EIO : 0;
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
+}
+
+void intel_gt_init_reset(struct intel_gt *gt)
+{
+ init_waitqueue_head(&gt->reset.queue);
+ mutex_init(&gt->reset.mutex);
+ init_srcu_struct(&gt->reset.backoff_srcu);
+}
+
+void intel_gt_fini_reset(struct intel_gt *gt)
+{
+ cleanup_srcu_struct(&gt->reset.backoff_srcu);
}
-static void i915_wedge_me(struct work_struct *work)
+static void intel_wedge_me(struct work_struct *work)
{
- struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
+ struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
- dev_err(w->i915->drm.dev,
+ dev_err(w->gt->i915->drm.dev,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
- i915_gem_set_wedged(w->i915);
+ intel_gt_set_wedged(w->gt);
}
-void __i915_init_wedge(struct i915_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name)
{
- w->i915 = i915;
+ w->gt = gt;
w->name = name;
- INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
+ INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
schedule_delayed_work(&w->work, timeout);
}
-void __i915_fini_wedge(struct i915_wedge_me *w)
+void __intel_fini_wedge(struct intel_wedge_me *w)
{
cancel_delayed_work_sync(&w->work);
destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
+ w->gt = NULL;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 580ebdb59eca..37a987b17108 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -11,58 +11,67 @@
#include <linux/types.h>
#include <linux/srcu.h>
-#include "gt/intel_engine_types.h"
+#include "intel_engine_types.h"
+#include "intel_reset_types.h"
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
struct intel_guc;
+void intel_gt_init_reset(struct intel_gt *gt);
+void intel_gt_fini_reset(struct intel_gt *gt);
+
__printf(4, 5)
-void i915_handle_error(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- unsigned long flags,
- const char *fmt, ...);
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
-void i915_check_and_clear_faults(struct drm_i915_private *i915);
-
-void i915_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask,
- const char *reason);
-int i915_reset_engine(struct intel_engine_cs *engine,
- const char *reason);
-
-void i915_reset_request(struct i915_request *rq, bool guilty);
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason);
+int intel_engine_reset(struct intel_engine_cs *engine,
+ const char *reason);
-int __must_check i915_reset_trylock(struct drm_i915_private *i915);
-void i915_reset_unlock(struct drm_i915_private *i915, int tag);
+void __i915_request_reset(struct i915_request *rq, bool guilty);
-int i915_terminally_wedged(struct drm_i915_private *i915);
+int __must_check intel_gt_reset_trylock(struct intel_gt *gt);
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
-bool intel_has_gpu_reset(struct drm_i915_private *i915);
-bool intel_has_reset_engine(struct drm_i915_private *i915);
+void intel_gt_set_wedged(struct intel_gt *gt);
+bool intel_gt_unset_wedged(struct intel_gt *gt);
+int intel_gt_terminally_wedged(struct intel_gt *gt);
-int intel_gpu_reset(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask);
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
-int intel_reset_guc(struct drm_i915_private *i915);
+int intel_reset_guc(struct intel_gt *gt);
-struct i915_wedge_me {
+struct intel_wedge_me {
struct delayed_work work;
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
const char *name;
};
-void __i915_init_wedge(struct i915_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name);
-void __i915_fini_wedge(struct i915_wedge_me *w);
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name);
+void __intel_fini_wedge(struct intel_wedge_me *w);
-#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __i915_fini_wedge((W)))
+#define intel_wedge_on_timeout(W, GT, TIMEOUT) \
+ for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
+ (W)->gt; \
+ __intel_fini_wedge((W)))
+
+static inline bool __intel_reset_failed(const struct intel_reset *reset)
+{
+ return unlikely(test_bit(I915_WEDGED, &reset->flags));
+}
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915);
+bool intel_has_reset_engine(struct drm_i915_private *i915);
#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
new file mode 100644
index 000000000000..31968356e0c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_RESET_TYPES_H_
+#define __INTEL_RESET_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/srcu.h>
+
+struct intel_reset {
+ /**
+ * flags: Control various stages of the GPU reset
+ *
+ * #I915_RESET_BACKOFF - When we start a global reset, we need to
+ * serialise with any other users attempting to do the same, and
+ * any global resources that may be clobber by the reset (such as
+ * FENCE registers).
+ *
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
+ * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+ * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+ * i915_request_alloc(), this bit is checked and the sequence
+ * aborted (with -EIO reported to userspace) if set.
+ */
+ unsigned long flags;
+#define I915_RESET_BACKOFF 0
+#define I915_RESET_MODESET 1
+#define I915_RESET_ENGINE 2
+#define I915_WEDGED (BITS_PER_LONG - 1)
+
+ struct mutex mutex; /* serialises wedging/unwedging */
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t queue;
+
+ struct srcu_struct backoff_srcu;
+};
+
+#endif /* _INTEL_RESET_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 12010e798868..601c16239fdf 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -34,9 +34,11 @@
#include "gem/i915_gem_context.h"
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
#include "i915_trace.h"
#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
#include "intel_workarounds.h"
@@ -75,7 +77,8 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = cmd;
while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = 0;
}
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
@@ -148,7 +151,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -156,7 +161,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -208,7 +215,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -241,7 +250,9 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
int ret;
@@ -299,7 +310,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
/* Finally we can flush and with it emit the breadcrumb */
@@ -342,7 +355,9 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
/*
@@ -623,7 +638,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct intel_ring *ring = engine->buffer;
+ struct intel_ring *ring = engine->legacy.ring;
int ret = 0;
GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
@@ -631,6 +646,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ /* WaClearRingBufHeadRegAtInit:ctg,elk */
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_DRIVER("%s head not reset to zero "
@@ -662,19 +678,16 @@ static int xcs_resume(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
- ENGINE_READ(engine, RING_HEAD);
+ ENGINE_POSTING_READ(engine, RING_HEAD);
- /* Initialize the ring. This must happen _after_ we've cleared the ring
+ /*
+ * Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
- * register values. */
+ * register values.
+ */
ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
- /* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (ENGINE_READ(engine, RING_HEAD))
- DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
- engine->name, ENGINE_READ(engine, RING_HEAD));
-
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
@@ -725,7 +738,45 @@ out:
static void reset_prepare(struct intel_engine_cs *engine)
{
- intel_engine_stop_cs(engine);
+ struct intel_uncore *uncore = engine->uncore;
+ const u32 base = engine->mmio_base;
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ GEM_TRACE("%s\n", engine->name);
+
+ if (intel_engine_stop_cs(engine))
+ GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+
+ intel_uncore_write_fw(uncore,
+ RING_HEAD(base),
+ intel_uncore_read_fw(uncore, RING_TAIL(base)));
+ intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
+
+ intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
+ intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
+ intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
+
+ /* The ring must be empty before it is disabled */
+ intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+
+ /* Check acts as a post */
+ if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
+ GEM_TRACE("%s: ring head [%x] not parked\n",
+ engine->name,
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
static void reset_ring(struct intel_engine_cs *engine, bool stalled)
@@ -781,14 +832,14 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
* If the request was innocent, we try to replay the request
* with the restored context.
*/
- i915_reset_request(rq, stalled);
+ __i915_request_reset(rq, stalled);
- GEM_BUG_ON(rq->ring != engine->buffer);
+ GEM_BUG_ON(rq->ring != engine->legacy.ring);
head = rq->head;
} else {
- head = engine->buffer->tail;
+ head = engine->legacy.ring->tail;
}
- engine->buffer->head = intel_ring_wrap(engine->buffer, head);
+ engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -797,21 +848,6 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
-static int intel_rcs_ctx_init(struct i915_request *rq)
-{
- int ret;
-
- ret = intel_engine_emit_ctx_wa(rq);
- if (ret != 0)
- return ret;
-
- ret = i915_gem_render_state_emit(rq);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int rcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -948,13 +984,13 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static void
gen5_irq_enable(struct intel_engine_cs *engine)
{
- gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen5_irq_disable(struct intel_engine_cs *engine)
{
- gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
@@ -1015,14 +1051,14 @@ gen6_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
- gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
- gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
@@ -1033,14 +1069,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
- gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
+ gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~0);
- gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
+ gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
}
static int
@@ -1071,9 +1107,11 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+ u32 *cs, cs_offset =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
- GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
+ GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1100,7 +1138,7 @@ i830_emit_bb_start(struct i915_request *rq,
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
*cs++ = cs_offset;
@@ -1156,10 +1194,6 @@ int intel_ring_pin(struct intel_ring *ring)
if (atomic_fetch_inc(&ring->pin_count))
return 0;
- ret = i915_timeline_pin(ring->timeline);
- if (ret)
- goto err_unpin;
-
flags = PIN_GLOBAL;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1172,7 +1206,7 @@ int intel_ring_pin(struct intel_ring *ring)
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- goto err_timeline;
+ goto err_unpin;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
@@ -1184,7 +1218,7 @@ int intel_ring_pin(struct intel_ring *ring)
goto err_ring;
}
- vma->obj->pin_global++;
+ i915_vma_make_unshrinkable(vma);
GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
@@ -1193,8 +1227,6 @@ int intel_ring_pin(struct intel_ring *ring)
err_ring:
i915_vma_unpin(vma);
-err_timeline:
- i915_timeline_unpin(ring->timeline);
err_unpin:
atomic_dec(&ring->pin_count);
return ret;
@@ -1202,8 +1234,7 @@ err_unpin:
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
- GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
-
+ tail = intel_ring_wrap(ring, tail);
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
@@ -1212,37 +1243,37 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
void intel_ring_unpin(struct intel_ring *ring)
{
+ struct i915_vma *vma = ring->vma;
+
if (!atomic_dec_and_test(&ring->pin_count))
return;
/* Discard any unused bytes beyond that submitted to hw. */
- intel_ring_reset(ring, ring->tail);
+ intel_ring_reset(ring, ring->emit);
- GEM_BUG_ON(!ring->vma);
- if (i915_vma_is_map_and_fenceable(ring->vma))
- i915_vma_unpin_iomap(ring->vma);
+ i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_is_map_and_fenceable(vma))
+ i915_vma_unpin_iomap(vma);
else
- i915_gem_object_unpin_map(ring->vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
GEM_BUG_ON(!ring->vaddr);
ring->vaddr = NULL;
- ring->vma->obj->pin_global--;
- i915_vma_unpin(ring->vma);
-
- i915_timeline_unpin(ring->timeline);
+ i915_vma_unpin(vma);
+ i915_vma_make_purgeable(vma);
}
-static struct i915_vma *
-intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
{
- struct i915_address_space *vm = &dev_priv->ggtt.vm;
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_object_create_stolen(dev_priv, size);
+ obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
- obj = i915_gem_object_create_internal(dev_priv, size);
+ obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1265,10 +1296,9 @@ err:
}
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct i915_timeline *timeline,
- int size)
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
{
+ struct drm_i915_private *i915 = engine->i915;
struct intel_ring *ring;
struct i915_vma *vma;
@@ -1280,8 +1310,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
return ERR_PTR(-ENOMEM);
kref_init(&ring->ref);
- INIT_LIST_HEAD(&ring->request_list);
- ring->timeline = i915_timeline_get(timeline);
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
@@ -1289,12 +1317,12 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->i915) || IS_I845G(engine->i915))
+ if (IS_I830(i915) || IS_I845G(i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
intel_ring_update_space(ring);
- vma = intel_ring_create_vma(engine->i915, size);
+ vma = create_ring_vma(engine->gt->ggtt, size);
if (IS_ERR(vma)) {
kfree(ring);
return ERR_CAST(vma);
@@ -1311,13 +1339,11 @@ void intel_ring_free(struct kref *ref)
i915_vma_close(ring->vma);
i915_vma_put(ring->vma);
- i915_timeline_put(ring->timeline);
kfree(ring);
}
static void __ring_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
@@ -1330,33 +1356,45 @@ static void ring_context_destroy(struct kref *ref)
if (ce->state)
__ring_context_fini(ce);
+ intel_context_fini(ce);
intel_context_free(ce);
}
-static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+static struct i915_address_space *vm_alias(struct intel_context *ce)
+{
+ struct i915_address_space *vm;
+
+ vm = ce->vm;
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static int __context_pin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
int err = 0;
- vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ vm = vm_alias(ce);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
return err;
}
-static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+static void __context_unpin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
- vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ vm = vm_alias(ce);
if (vm)
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
static void ring_context_unpin(struct intel_context *ce)
{
- __context_unpin_ppgtt(ce->gem_context);
+ __context_unpin_ppgtt(ce);
}
static struct i915_vma *
@@ -1412,7 +1450,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_unpin_map(obj);
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1427,16 +1465,17 @@ err_obj:
return ERR_PTR(err);
}
-static int ring_context_pin(struct intel_context *ce)
+static int ring_context_alloc(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- int err;
/* One ringbuffer to rule them all */
- GEM_BUG_ON(!engine->buffer);
- ce->ring = engine->buffer;
+ GEM_BUG_ON(!engine->legacy.ring);
+ ce->ring = engine->legacy.ring;
+ ce->timeline = intel_timeline_get(engine->legacy.timeline);
- if (!ce->state && engine->context_size) {
+ GEM_BUG_ON(ce->state);
+ if (engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
@@ -1446,11 +1485,18 @@ static int ring_context_pin(struct intel_context *ce)
ce->state = vma;
}
- err = intel_context_active_acquire(ce, PIN_HIGH);
+ return 0;
+}
+
+static int ring_context_pin(struct intel_context *ce)
+{
+ int err;
+
+ err = intel_context_active_acquire(ce);
if (err)
return err;
- err = __context_pin_ppgtt(ce->gem_context);
+ err = __context_pin_ppgtt(ce);
if (err)
goto err_active;
@@ -1467,6 +1513,8 @@ static void ring_context_reset(struct intel_context *ce)
}
static const struct intel_context_ops ring_context_ops = {
+ .alloc = ring_context_alloc,
+
.pin = ring_context_pin,
.unpin = ring_context_unpin,
@@ -1492,7 +1540,7 @@ static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = ppgtt->pd->base.ggtt_offset << 10;
+ *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
intel_ring_advance(rq, cs);
@@ -1511,7 +1559,8 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1627,7 +1676,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1640,7 +1690,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
return 0;
}
-static int remap_l3(struct i915_request *rq, int slice)
+static int remap_l3_slice(struct i915_request *rq, int slice)
{
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
@@ -1668,15 +1718,34 @@ static int remap_l3(struct i915_request *rq, int slice)
return 0;
}
+static int remap_l3(struct i915_request *rq)
+{
+ struct i915_gem_context *ctx = rq->gem_context;
+ int i, err;
+
+ if (!ctx->remap_slice)
+ return 0;
+
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(ctx->remap_slice & BIT(i)))
+ continue;
+
+ err = remap_l3_slice(rq, i);
+ if (err)
+ return err;
+ }
+
+ ctx->remap_slice = 0;
+ return 0;
+}
+
static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *ctx = rq->gem_context;
- struct i915_address_space *vm =
- ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm;
+ struct i915_address_space *vm = vm_alias(rq->hw_context);
unsigned int unwind_mm = 0;
u32 hw_flags = 0;
- int ret, i;
+ int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
@@ -1720,7 +1789,7 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
- if (i915_gem_context_is_kernel(ctx))
+ if (i915_gem_context_is_kernel(rq->gem_context))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
@@ -1754,18 +1823,9 @@ static int switch_context(struct i915_request *rq)
goto err_mm;
}
- if (ctx->remap_slice) {
- for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(ctx->remap_slice & BIT(i)))
- continue;
-
- ret = remap_l3(rq, i);
- if (ret)
- goto err_mm;
- }
-
- ctx->remap_slice = 0;
- }
+ ret = remap_l3(rq);
+ if (ret)
+ goto err_mm;
return 0;
@@ -1803,7 +1863,10 @@ static int ring_request_alloc(struct i915_request *request)
return 0;
}
-static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
+static noinline int
+wait_for_space(struct intel_ring *ring,
+ struct intel_timeline *tl,
+ unsigned int bytes)
{
struct i915_request *target;
long timeout;
@@ -1811,15 +1874,18 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
if (intel_ring_update_space(ring) >= bytes)
return 0;
- GEM_BUG_ON(list_empty(&ring->request_list));
- list_for_each_entry(target, &ring->request_list, ring_link) {
+ GEM_BUG_ON(list_empty(&tl->requests));
+ list_for_each_entry(target, &tl->requests, link) {
+ if (target->ring != ring)
+ continue;
+
/* Would completion of this request free enough space? */
if (bytes <= __intel_ring_space(target->postfix,
ring->emit, ring->size))
break;
}
- if (WARN_ON(&target->ring_link == &ring->request_list))
+ if (GEM_WARN_ON(&target->link == &tl->requests))
return -ENOSPC;
timeout = i915_request_wait(target,
@@ -1886,7 +1952,7 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
*/
GEM_BUG_ON(!rq->reserved_space);
- ret = wait_for_space(ring, total_bytes);
+ ret = wait_for_space(ring, rq->timeline, total_bytes);
if (unlikely(ret))
return ERR_PTR(ret);
}
@@ -2091,8 +2157,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
- intel_ring_unpin(engine->buffer);
- intel_ring_put(engine->buffer);
+ intel_ring_unpin(engine->legacy.ring);
+ intel_ring_put(engine->legacy.ring);
+
+ intel_timeline_unpin(engine->legacy.timeline);
+ intel_timeline_put(engine->legacy.timeline);
kfree(engine);
}
@@ -2166,11 +2235,9 @@ static void setup_rcs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (INTEL_GEN(i915) >= 7) {
- engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
} else if (IS_GEN(i915, 6)) {
- engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen6_render_ring_flush;
engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
} else if (IS_GEN(i915, 5)) {
@@ -2267,43 +2334,51 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
int intel_ring_submission_init(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
struct intel_ring *ring;
int err;
- timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
+ timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
- ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
- i915_timeline_put(timeline);
+ err = intel_timeline_pin(timeline);
+ if (err)
+ goto err_timeline;
+
+ ring = intel_engine_create_ring(engine, SZ_16K);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err;
+ goto err_timeline_unpin;
}
err = intel_ring_pin(ring);
if (err)
goto err_ring;
- GEM_BUG_ON(engine->buffer);
- engine->buffer = ring;
+ GEM_BUG_ON(engine->legacy.ring);
+ engine->legacy.ring = ring;
+ engine->legacy.timeline = timeline;
err = intel_engine_init_common(engine);
if (err)
- goto err_unpin;
+ goto err_ring_unpin;
- GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+ GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
return 0;
-err_unpin:
+err_ring_unpin:
intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
+err_timeline_unpin:
+ intel_timeline_unpin(timeline);
+err_timeline:
+ intel_timeline_put(timeline);
err:
intel_engine_cleanup_common(engine);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index a0756f006f5f..6bf2d87da109 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -49,7 +49,7 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
* cases which disable slices for functional, apart for performance
* reasons. So in this case we select a known stable subset.
*/
- if (!i915->perf.oa.exclusive_stream) {
+ if (!i915->perf.exclusive_stream) {
ctx_sseu = *req_sseu;
} else {
ctx_sseu = intel_sseu_from_device_info(sseu);
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index c311ce9c6f9d..9cb01d9828f1 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -4,38 +4,36 @@
* Copyright © 2016-2018 Intel Corporation
*/
+#include "gt/intel_gt_types.h"
+
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_syncmap.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
-struct i915_timeline_hwsp {
- struct i915_gt_timelines *gt;
+struct intel_timeline_hwsp {
+ struct intel_gt *gt;
+ struct intel_gt_timelines *gt_timelines;
struct list_head free_link;
struct i915_vma *vma;
u64 free_bitmap;
};
-struct i915_timeline_cacheline {
+struct intel_timeline_cacheline {
struct i915_active active;
- struct i915_timeline_hwsp *hwsp;
+ struct intel_timeline_hwsp *hwsp;
void *vaddr;
#define CACHELINE_BITS 6
#define CACHELINE_FREE CACHELINE_BITS
};
-static inline struct drm_i915_private *
-hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
-{
- return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
-}
-
-static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
+static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -45,7 +43,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma))
i915_gem_object_put(obj);
@@ -53,11 +51,10 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
}
static struct i915_vma *
-hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
+hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
{
- struct drm_i915_private *i915 = timeline->i915;
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline_hwsp *hwsp;
+ struct intel_gt_timelines *gt = &timeline->gt->timelines;
+ struct intel_timeline_hwsp *hwsp;
BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
@@ -75,16 +72,17 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp)
return ERR_PTR(-ENOMEM);
- vma = __hwsp_alloc(i915);
+ vma = __hwsp_alloc(timeline->gt);
if (IS_ERR(vma)) {
kfree(hwsp);
return vma;
}
vma->private = hwsp;
+ hwsp->gt = timeline->gt;
hwsp->vma = vma;
hwsp->free_bitmap = ~0ull;
- hwsp->gt = gt;
+ hwsp->gt_timelines = gt;
spin_lock_irq(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -102,9 +100,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
return hwsp->vma;
}
-static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
+static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
{
- struct i915_gt_timelines *gt = hwsp->gt;
+ struct intel_gt_timelines *gt = hwsp->gt_timelines;
unsigned long flags;
spin_lock_irqsave(&gt->hwsp_lock, flags);
@@ -126,7 +124,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
-static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
@@ -140,7 +138,7 @@ static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
static void __cacheline_retire(struct i915_active *active)
{
- struct i915_timeline_cacheline *cl =
+ struct intel_timeline_cacheline *cl =
container_of(active, typeof(*cl), active);
i915_vma_unpin(cl->hwsp->vma);
@@ -148,10 +146,19 @@ static void __cacheline_retire(struct i915_active *active)
__idle_cacheline_free(cl);
}
-static struct i915_timeline_cacheline *
-cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+static int __cacheline_active(struct i915_active *active)
{
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl =
+ container_of(active, typeof(*cl), active);
+
+ __i915_vma_pin(cl->hwsp->vma);
+ return 0;
+}
+
+static struct intel_timeline_cacheline *
+cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
+{
+ struct intel_timeline_cacheline *cl;
void *vaddr;
GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
@@ -170,24 +177,25 @@ cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
- i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+ i915_active_init(hwsp->gt->i915, &cl->active,
+ __cacheline_active, __cacheline_retire);
return cl;
}
-static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+static void cacheline_acquire(struct intel_timeline_cacheline *cl)
{
- if (cl && i915_active_acquire(&cl->active))
- __i915_vma_pin(cl->hwsp->vma);
+ if (cl)
+ i915_active_acquire(&cl->active);
}
-static void cacheline_release(struct i915_timeline_cacheline *cl)
+static void cacheline_release(struct intel_timeline_cacheline *cl)
{
if (cl)
i915_active_release(&cl->active);
}
-static void cacheline_free(struct i915_timeline_cacheline *cl)
+static void cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
@@ -196,29 +204,22 @@ static void cacheline_free(struct i915_timeline_cacheline *cl)
__idle_cacheline_free(cl);
}
-int i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *timeline,
- struct i915_vma *hwsp)
+int intel_timeline_init(struct intel_timeline *timeline,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp)
{
void *vaddr;
- /*
- * Ideally we want a set of engines on a single leaf as we expect
- * to mostly be tracking synchronisation between engines. It is not
- * a huge issue if this is not the case, but we may want to mitigate
- * any page crossing penalties if they become an issue.
- *
- * Called during early_init before we know how many engines there are.
- */
- BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+ kref_init(&timeline->kref);
+ atomic_set(&timeline->pin_count, 0);
+
+ timeline->gt = gt;
- timeline->i915 = i915;
- timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
timeline->hwsp_cacheline = NULL;
if (!hwsp) {
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl;
unsigned int cacheline;
hwsp = hwsp_alloc(timeline, &cacheline);
@@ -253,7 +254,7 @@ int i915_timeline_init(struct drm_i915_private *i915,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request);
+ INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -261,73 +262,27 @@ int i915_timeline_init(struct drm_i915_private *i915,
return 0;
}
-void i915_timelines_init(struct drm_i915_private *i915)
+static void timelines_init(struct intel_gt *gt)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
-
- mutex_init(&gt->mutex);
- INIT_LIST_HEAD(&gt->active_list);
+ struct intel_gt_timelines *timelines = &gt->timelines;
- spin_lock_init(&gt->hwsp_lock);
- INIT_LIST_HEAD(&gt->hwsp_free_list);
+ spin_lock_init(&timelines->lock);
+ INIT_LIST_HEAD(&timelines->active_list);
- /* via i915_gem_wait_for_idle() */
- i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
+ spin_lock_init(&timelines->hwsp_lock);
+ INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-static void timeline_add_to_active(struct i915_timeline *tl)
+void intel_timelines_init(struct drm_i915_private *i915)
{
- struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
-
- mutex_lock(&gt->mutex);
- list_add(&tl->link, &gt->active_list);
- mutex_unlock(&gt->mutex);
-}
-
-static void timeline_remove_from_active(struct i915_timeline *tl)
-{
- struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
-
- mutex_lock(&gt->mutex);
- list_del(&tl->link);
- mutex_unlock(&gt->mutex);
-}
-
-/**
- * i915_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void i915_timelines_park(struct drm_i915_private *i915)
-{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline *timeline;
-
- mutex_lock(&gt->mutex);
- list_for_each_entry(timeline, &gt->active_list, link) {
- /*
- * All known fences are completed so we can scrap
- * the current sync point tracking and start afresh,
- * any attempt to wait upon a previous sync point
- * will be skipped as the fence was signaled.
- */
- i915_syncmap_free(&timeline->sync);
- }
- mutex_unlock(&gt->mutex);
+ timelines_init(&i915->gt);
}
-void i915_timeline_fini(struct i915_timeline *timeline)
+void intel_timeline_fini(struct intel_timeline *timeline)
{
- GEM_BUG_ON(timeline->pin_count);
+ GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests));
- i915_syncmap_free(&timeline->sync);
-
if (timeline->hwsp_cacheline)
cacheline_free(timeline->hwsp_cacheline);
else
@@ -336,73 +291,108 @@ void i915_timeline_fini(struct i915_timeline *timeline)
i915_vma_put(timeline->hwsp_ggtt);
}
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
- struct i915_vma *global_hwsp)
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
{
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
int err;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (!timeline)
return ERR_PTR(-ENOMEM);
- err = i915_timeline_init(i915, timeline, global_hwsp);
+ err = intel_timeline_init(timeline, gt, global_hwsp);
if (err) {
kfree(timeline);
return ERR_PTR(err);
}
- kref_init(&timeline->kref);
-
return timeline;
}
-int i915_timeline_pin(struct i915_timeline *tl)
+int intel_timeline_pin(struct intel_timeline *tl)
{
int err;
- if (tl->pin_count++)
+ if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
- GEM_BUG_ON(!tl->pin_count);
err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
- goto unpin;
+ return err;
tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
- timeline_add_to_active(tl);
+ if (atomic_fetch_inc(&tl->pin_count)) {
+ cacheline_release(tl->hwsp_cacheline);
+ __i915_vma_unpin(tl->hwsp_ggtt);
+ }
return 0;
+}
-unpin:
- tl->pin_count = 0;
- return err;
+void intel_timeline_enter(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+ unsigned long flags;
+
+ lockdep_assert_held(&tl->mutex);
+
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ if (tl->active_count++)
+ return;
+ GEM_BUG_ON(!tl->active_count); /* overflow? */
+
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_add(&tl->link, &timelines->active_list);
+ spin_unlock_irqrestore(&timelines->lock, flags);
}
-static u32 timeline_advance(struct i915_timeline *tl)
+void intel_timeline_exit(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+ unsigned long flags;
+
+ lockdep_assert_held(&tl->mutex);
+
+ GEM_BUG_ON(!tl->active_count);
+ if (--tl->active_count)
+ return;
+
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_del(&tl->link);
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ /*
+ * Since this timeline is idle, all bariers upon which we were waiting
+ * must also be complete and so we can discard the last used barriers
+ * without loss of information.
+ */
+ i915_syncmap_free(&tl->sync);
+}
+
+static u32 timeline_advance(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
-static void timeline_rollback(struct i915_timeline *tl)
+static void timeline_rollback(struct intel_timeline *tl)
{
tl->seqno -= 1 + tl->has_initial_breadcrumb;
}
static noinline int
-__i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno)
+__intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
{
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl;
unsigned int cacheline;
struct i915_vma *vma;
void *vaddr;
@@ -452,8 +442,7 @@ __i915_timeline_get_seqno(struct i915_timeline *tl,
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
- err = i915_active_ref(&tl->hwsp_cacheline->active,
- tl->fence_context, rq);
+ err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
if (err)
goto err_cacheline;
@@ -488,31 +477,31 @@ err_rollback:
return err;
}
-int i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno)
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
{
*seqno = timeline_advance(tl);
/* Replace the HWSP on wraparound for HW semaphores */
if (unlikely(!*seqno && tl->hwsp_cacheline))
- return __i915_timeline_get_seqno(tl, rq, seqno);
+ return __intel_timeline_get_seqno(tl, rq, seqno);
return 0;
}
-static int cacheline_ref(struct i915_timeline_cacheline *cl,
+static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq)
{
- return i915_active_ref(&cl->active, rq->fence.context, rq);
+ return i915_active_ref(&cl->active, rq->timeline, rq);
}
-int i915_timeline_read_hwsp(struct i915_request *from,
- struct i915_request *to,
- u32 *hwsp)
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *to,
+ u32 *hwsp)
{
- struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
- struct i915_timeline *tl = from->timeline;
+ struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
+ struct intel_timeline *tl = from->timeline;
int err;
GEM_BUG_ON(to->timeline == tl);
@@ -535,45 +524,40 @@ int i915_timeline_read_hwsp(struct i915_request *from,
return err;
}
-void i915_timeline_unpin(struct i915_timeline *tl)
+void intel_timeline_unpin(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
- if (--tl->pin_count)
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ if (!atomic_dec_and_test(&tl->pin_count))
return;
- timeline_remove_from_active(tl);
cacheline_release(tl->hwsp_cacheline);
- /*
- * Since this timeline is idle, all bariers upon which we were waiting
- * must also be complete and so we can discard the last used barriers
- * without loss of information.
- */
- i915_syncmap_free(&tl->sync);
-
__i915_vma_unpin(tl->hwsp_ggtt);
}
-void __i915_timeline_free(struct kref *kref)
+void __intel_timeline_free(struct kref *kref)
{
- struct i915_timeline *timeline =
+ struct intel_timeline *timeline =
container_of(kref, typeof(*timeline), kref);
- i915_timeline_fini(timeline);
+ intel_timeline_fini(timeline);
kfree(timeline);
}
-void i915_timelines_fini(struct drm_i915_private *i915)
+static void timelines_fini(struct intel_gt *gt)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_gt_timelines *timelines = &gt->timelines;
- GEM_BUG_ON(!list_empty(&gt->active_list));
- GEM_BUG_ON(!list_empty(&gt->hwsp_free_list));
+ GEM_BUG_ON(!list_empty(&timelines->active_list));
+ GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
+}
- mutex_destroy(&gt->mutex);
+void intel_timelines_fini(struct drm_i915_private *i915)
+{
+ timelines_fini(&i915->gt);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_timeline.c"
-#include "selftests/i915_timeline.c"
+#include "gt/selftests/mock_timeline.c"
+#include "gt/selftest_timeline.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
new file mode 100644
index 000000000000..f583af1ba18d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
+
+#include <linux/lockdep.h>
+
+#include "i915_active.h"
+#include "i915_syncmap.h"
+#include "gt/intel_timeline_types.h"
+
+int intel_timeline_init(struct intel_timeline *tl,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp);
+void intel_timeline_fini(struct intel_timeline *tl);
+
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
+
+static inline struct intel_timeline *
+intel_timeline_get(struct intel_timeline *timeline)
+{
+ kref_get(&timeline->kref);
+ return timeline;
+}
+
+void __intel_timeline_free(struct kref *kref);
+static inline void intel_timeline_put(struct intel_timeline *timeline)
+{
+ kref_put(&timeline->kref, __intel_timeline_free);
+}
+
+static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_set(&tl->sync, context, seqno);
+}
+
+static inline int intel_timeline_sync_set(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+}
+
+static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_is_later(&tl->sync, context, seqno);
+}
+
+static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+}
+
+int intel_timeline_pin(struct intel_timeline *tl);
+void intel_timeline_enter(struct intel_timeline *tl);
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno);
+void intel_timeline_exit(struct intel_timeline *tl);
+void intel_timeline_unpin(struct intel_timeline *tl);
+
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *until,
+ u32 *hwsp_offset);
+
+void intel_timelines_init(struct drm_i915_private *i915);
+void intel_timelines_fini(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index fce5cb4f1090..2b1baf2fcc8e 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -16,21 +16,39 @@
struct drm_i915_private;
struct i915_vma;
-struct i915_timeline_cacheline;
+struct intel_timeline_cacheline;
struct i915_syncmap;
-struct i915_timeline {
+struct intel_timeline {
u64 fence_context;
u32 seqno;
struct mutex mutex; /* protects the flow of requests */
- unsigned int pin_count;
+ /*
+ * pin_count and active_count track essentially the same thing:
+ * How many requests are in flight or may be under construction.
+ *
+ * We need two distinct counters so that we can assign different
+ * lifetimes to the events for different use-cases. For example,
+ * we want to permanently keep the timeline pinned for the kernel
+ * context so that we can issue requests at any time without having
+ * to acquire space in the GGTT. However, we want to keep tracking
+ * the activity (to be able to detect when we become idle) along that
+ * permanently pinned timeline and so end up requiring two counters.
+ *
+ * Note that the active_count is protected by the intel_timeline.mutex,
+ * but the pin_count is protected by a combination of serialisation
+ * from the intel_context caller plus internal atomicity.
+ */
+ atomic_t pin_count;
+ unsigned int active_count;
+
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;
- struct i915_timeline_cacheline *hwsp_cacheline;
+ struct intel_timeline_cacheline *hwsp_cacheline;
bool has_initial_breadcrumb;
@@ -59,7 +77,7 @@ struct i915_timeline {
struct i915_syncmap *sync;
struct list_head link;
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct kref kref;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 99e8242194c0..45481eb1fa3c 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_context.h"
+#include "intel_gt.h"
#include "intel_workarounds.h"
/**
@@ -49,9 +50,10 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_init_start(struct i915_wa_list *wal, const char *name)
+static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
{
wal->name = name;
+ wal->engine_name = engine_name;
}
#define WA_LIST_CHUNK (1 << 4)
@@ -73,8 +75,8 @@ static void wa_init_finish(struct i915_wa_list *wal)
if (!wal->count)
return;
- DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
- wal->wa_count, wal->name);
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
+ wal->wa_count, wal->name, wal->engine_name);
}
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
@@ -175,19 +177,6 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
-static void
-ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
-{
- struct i915_wa wa = {
- .reg = reg,
- .mask = mask,
- .val = val,
- /* Bonkers HW, skip verifying */
- };
-
- _wa_add(wal, &wa);
-}
-
#define WA_SET_BIT_MASKED(addr, mask) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
@@ -531,12 +520,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
GEN8_ERRDETBCTRL);
- /* WaDisableBankHangMode:icl */
- wa_write(wal,
- GEN8_L3CNTLREG,
- intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
- GEN8_ERRDETBCTRL);
-
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
@@ -581,6 +564,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
}
+static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+}
+
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
@@ -591,9 +579,11 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
return;
- wa_init_start(wal, name);
+ wa_init_start(wal, name, engine->name);
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 11))
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine, wal);
@@ -761,7 +751,10 @@ static void
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
- u32 mcr_slice_subslice_mask;
+ unsigned int slice, subslice;
+ u32 l3_en, mcr, mcr_mask;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 10);
/*
* WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
@@ -769,42 +762,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* the case, we might need to program MCR select to a valid L3Bank
* by default, to make sure we correctly read certain registers
* later on (in the range 0xB100 - 0xB3FF).
- * This might be incompatible with
- * WaProgramMgsrForCorrectSliceSpecificMmioReads.
- * Fortunately, this should not happen in production hardware, so
- * we only assert that this is the case (instead of implementing
- * something more complex that requires checking the range of every
- * MMIO read).
- */
- if (INTEL_GEN(i915) >= 10 &&
- is_power_of_2(sseu->slice_mask)) {
- /*
- * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
- * enabled subslice, no need to redirect MCR packet
- */
- u32 slice = fls(sseu->slice_mask);
- u32 fuse3 =
- intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
- u8 ss_mask = sseu->subslice_mask[slice];
-
- u8 enabled_mask = (ss_mask | ss_mask >>
- GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
- u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
-
- /*
- * Production silicon should have matched L3Bank and
- * subslice enabled
- */
- WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
- }
-
- if (INTEL_GEN(i915) >= 11)
- mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
- GEN11_MCR_SUBSLICE_MASK;
- else
- mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
- GEN8_MCR_SUBSLICE_MASK;
- /*
+ *
* WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
* Before any MMIO read into slice/subslice specific registers, MCR
* packet control register needs to be programmed to point to any
@@ -814,11 +772,51 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* are consistent across s/ss in almost all cases. In the rare
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
+ *
+ * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
+ * to which subslice, or to which L3 bank, the respective mmio reads
+ * will go, we have to find a common index which works for both
+ * accesses.
+ *
+ * Case where we cannot find a common index fortunately should not
+ * happen in production hardware, so we only emit a warning instead of
+ * implementing something more complex that requires checking the range
+ * of every MMIO read.
*/
- wa_write_masked_or(wal,
- GEN8_MCR_SELECTOR,
- mcr_slice_subslice_mask,
- intel_calculate_mcr_s_ss_select(i915));
+
+ if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
+ u32 l3_fuse =
+ intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
+ GEN10_L3BANK_MASK;
+
+ DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
+ l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
+ } else {
+ l3_en = ~0;
+ }
+
+ slice = fls(sseu->slice_mask) - 1;
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+ subslice = fls(l3_en & sseu->subslice_mask[slice]);
+ if (!subslice) {
+ DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
+ sseu->subslice_mask[slice], l3_en);
+ subslice = fls(l3_en);
+ WARN_ON(!subslice);
+ }
+ subslice--;
+
+ if (INTEL_GEN(i915) >= 11) {
+ mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ } else {
+ mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ }
+
+ DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
+
+ wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
@@ -895,9 +893,16 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
+tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+}
+
+static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_gt_workarounds_init(i915, wal);
+ else if (IS_GEN(i915, 11))
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
@@ -921,7 +926,7 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
- wa_init_start(wal, "GT");
+ wa_init_start(wal, "GT", "global");
gt_init_workarounds(i915, wal);
wa_init_finish(wal);
}
@@ -985,9 +990,9 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
spin_unlock_irqrestore(&uncore->lock, flags);
}
-void intel_gt_apply_workarounds(struct drm_i915_private *i915)
+void intel_gt_apply_workarounds(struct intel_gt *gt)
{
- wa_list_apply(&i915->uncore, &i915->gt_wa_list);
+ wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
}
static bool wa_list_verify(struct intel_uncore *uncore,
@@ -1006,10 +1011,23 @@ static bool wa_list_verify(struct intel_uncore *uncore,
return ok;
}
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
- const char *from)
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
{
- return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from);
+ return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
+}
+
+static inline bool is_nonpriv_flags_valid(u32 flags)
+{
+ /* Check only valid flag bits are set */
+ if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
+ return false;
+
+ /* NB: Only 3 out of 4 enum values are valid for access field */
+ if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
+ return false;
+
+ return true;
}
static void
@@ -1022,6 +1040,9 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
+ if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
+ return;
+
wa.reg.reg |= flags;
_wa_add(wal, &wa);
}
@@ -1029,7 +1050,7 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
- whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
+ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
}
static void gen9_whitelist_build(struct i915_wa_list *w)
@@ -1110,7 +1131,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
- RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
}
@@ -1150,20 +1171,20 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
- RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
/* hucStatusRegOffset */
whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucUKernelHdrInfoRegOffset */
whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
@@ -1171,14 +1192,20 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
}
}
+static void tgl_whitelist_build(struct intel_engine_cs *engine)
+{
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- wa_init_start(w, "whitelist");
+ wa_init_start(w, "whitelist", engine->name);
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_whitelist_build(engine);
+ else if (IS_GEN(i915, 11))
icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(engine);
@@ -1235,10 +1262,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaPipelineFlushCoherentLines:icl */
- ignore_wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN8_LQSC_FLUSH_COHERENT_LINES,
- GEN8_LQSC_FLUSH_COHERENT_LINES);
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* Wa_1405543622:icl
@@ -1265,10 +1291,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
- ignore_wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN11_LQSC_CLEAN_EVICT_DISABLE,
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* WaForwardProgressSoftReset:icl */
wa_write_or(wal,
@@ -1287,6 +1312,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
+
+ /* Wa_1409178092:icl */
+ wa_write_masked_or(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
}
if (IS_GEN_RANGE(i915, 9, 11)) {
@@ -1355,7 +1386,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
return;
- if (engine->id == RCS0)
+ if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
@@ -1365,10 +1396,10 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
- if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ if (INTEL_GEN(engine->i915) < 8)
return;
- wa_init_start(wal, engine->name);
+ wa_init_start(wal, "engine", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
@@ -1411,26 +1442,50 @@ err_obj:
return ERR_PTR(err);
}
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ /*
+ * Registers in this range are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff))
+ return true;
+
+ return false;
+}
+
static int
wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = rq->i915;
+ unsigned int i, count = 0;
const struct i915_wa *wa;
- unsigned int i;
u32 srm, *cs;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(i915) >= 8)
srm++;
- cs = intel_ring_begin(rq, 4 * wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
+ count++;
+ }
+
+ cs = intel_ring_begin(rq, 4 * count);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 offset = i915_mmio_reg_offset(wa->reg);
+
+ if (mcr_range(i915, offset))
+ continue;
+
*cs++ = srm;
- *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = offset;
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
*cs++ = 0;
}
@@ -1453,7 +1508,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
+ vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1480,9 +1535,13 @@ static int engine_wa_list_verify(struct intel_context *ce,
}
err = 0;
- for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
+ continue;
+
if (!wa_verify(wa, results[i], wal->name, from))
err = -ENXIO;
+ }
i915_gem_object_unpin_map(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 3761a6ee58bb..8c9c769c2204 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -14,6 +14,7 @@
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
@@ -25,9 +26,8 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
void intel_gt_init_workarounds(struct drm_i915_private *i915);
-void intel_gt_apply_workarounds(struct drm_i915_private *i915);
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
- const char *from);
+void intel_gt_apply_workarounds(struct intel_gt *gt);
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from);
void intel_engine_init_whitelist(struct intel_engine_cs *engine);
void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index 42ac1fb99572..e27ab1b710b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -20,6 +20,7 @@ struct i915_wa {
struct i915_wa_list {
const char *name;
+ const char *engine_name;
struct i915_wa *list;
unsigned int count;
unsigned int wa_count;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 486c6953dcb1..5d43cbc3f345 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -27,59 +27,40 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
-struct mock_ring {
- struct intel_ring base;
- struct i915_timeline timeline;
-};
-
-static void mock_timeline_pin(struct i915_timeline *tl)
+static void mock_timeline_pin(struct intel_timeline *tl)
{
- tl->pin_count++;
+ atomic_inc(&tl->pin_count);
}
-static void mock_timeline_unpin(struct i915_timeline *tl)
+static void mock_timeline_unpin(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
- tl->pin_count--;
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ atomic_dec(&tl->pin_count);
}
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
const unsigned long sz = PAGE_SIZE / 2;
- struct mock_ring *ring;
+ struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
- if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
- kfree(ring);
- return NULL;
- }
-
- kref_init(&ring->base.ref);
- ring->base.size = sz;
- ring->base.effective_size = sz;
- ring->base.vaddr = (void *)(ring + 1);
- ring->base.timeline = &ring->timeline;
- atomic_set(&ring->base.pin_count, 1);
+ kref_init(&ring->ref);
+ ring->size = sz;
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+ atomic_set(&ring->pin_count, 1);
- INIT_LIST_HEAD(&ring->base.request_list);
- intel_ring_update_space(&ring->base);
+ intel_ring_update_space(ring);
- return &ring->base;
-}
-
-static void mock_ring_free(struct intel_ring *base)
-{
- struct mock_ring *ring = container_of(base, typeof(*ring), base);
-
- i915_timeline_fini(&ring->timeline);
- kfree(ring);
+ return ring;
}
static struct i915_request *first_request(struct mock_engine *engine)
@@ -130,7 +111,6 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
- mock_timeline_unpin(ce->ring->timeline);
}
static void mock_context_destroy(struct kref *ref)
@@ -139,31 +119,41 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->ring)
- mock_ring_free(ce->ring);
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ kfree(ce->ring);
+ mock_timeline_unpin(ce->timeline);
+ }
+ intel_context_fini(ce);
intel_context_free(ce);
}
-static int mock_context_pin(struct intel_context *ce)
+static int mock_context_alloc(struct intel_context *ce)
{
- int ret;
-
- if (!ce->ring) {
- ce->ring = mock_ring(ce->engine);
- if (!ce->ring)
- return -ENOMEM;
+ ce->ring = mock_ring(ce->engine);
+ if (!ce->ring)
+ return -ENOMEM;
+
+ GEM_BUG_ON(ce->timeline);
+ ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
+ if (IS_ERR(ce->timeline)) {
+ kfree(ce->engine);
+ return PTR_ERR(ce->timeline);
}
- ret = intel_context_active_acquire(ce, PIN_HIGH);
- if (ret)
- return ret;
+ mock_timeline_pin(ce->timeline);
- mock_timeline_pin(ce->ring->timeline);
return 0;
}
+static int mock_context_pin(struct intel_context *ce)
+{
+ return intel_context_active_acquire(ce);
+}
+
static const struct intel_context_ops mock_context_ops = {
+ .alloc = mock_context_alloc,
+
.pin = mock_context_pin,
.unpin = mock_context_unpin,
@@ -257,9 +247,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
+ engine->base.gt = &i915->gt;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
+ engine->base.instance = id;
engine->base.status_page.addr = (void *)(engine + 1);
engine->base.cops = &mock_context_ops;
@@ -278,29 +270,26 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
+ intel_engine_add_user(&engine->base);
+
return &engine->base;
}
int mock_engine_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
- int err;
+ struct intel_context *ce;
intel_engine_init_active(engine, ENGINE_MOCK);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
+ intel_engine_pool_init(&engine->pool);
- engine->kernel_context =
- i915_gem_context_get_engine(i915->kernel_context, engine->id);
- if (IS_ERR(engine->kernel_context))
- goto err_breadcrumbs;
-
- err = intel_context_pin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
- if (err)
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
goto err_breadcrumbs;
+ engine->kernel_context = ce;
return 0;
err_breadcrumbs:
@@ -334,6 +323,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
GEM_BUG_ON(timer_pending(&mock->hw_delay));
intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
new file mode 100644
index 000000000000..9d1ea26c7a2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -0,0 +1,456 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+
+static int request_sync(struct i915_request *rq)
+{
+ long timeout;
+ int err = 0;
+
+ i915_request_get(rq);
+
+ i915_request_add(rq);
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0) {
+ err = timeout;
+ } else {
+ mutex_lock(&rq->timeline->mutex);
+ i915_request_retire_upto(rq);
+ mutex_unlock(&rq->timeline->mutex);
+ }
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err = 0;
+
+ mutex_lock(&tl->mutex);
+ do {
+ struct i915_request *rq;
+ long timeout;
+
+ rcu_read_lock();
+ rq = rcu_dereference(tl->last_request.request);
+ if (rq)
+ rq = i915_request_get_rcu(rq);
+ rcu_read_unlock();
+ if (!rq)
+ break;
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0)
+ err = timeout;
+ else
+ i915_request_retire_upto(rq);
+
+ i915_request_put(rq);
+ } while (!err);
+ mutex_unlock(&tl->mutex);
+
+ return err;
+}
+
+static int __live_context_size(struct intel_engine_cs *engine,
+ struct i915_gem_context *fixme)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ void *vaddr;
+ int err;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err;
+
+ vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ intel_context_unpin(ce);
+ goto err;
+ }
+
+ /*
+ * Note that execlists also applies a redzone which it checks on
+ * context unpin when debugging. We are using the same location
+ * and same poison value so that our checks overlap. Despite the
+ * redundancy, we want to keep this little selftest so that we
+ * get coverage of any and all submission backends, and we can
+ * always extend this test to ensure we trick the HW into a
+ * compromising position wrt to the various sections that need
+ * to be written into the context state.
+ *
+ * TLDR; this overlaps with the execlists redzone.
+ */
+ if (HAS_EXECLISTS(engine->i915))
+ vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
+
+ vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
+ memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ intel_context_unpin(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ /* Force the context switch */
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
+ pr_err("%s context overwrote trailing red-zone!", engine->name);
+ err = -EINVAL;
+ }
+
+err_unpin:
+ i915_gem_object_unpin_map(ce->state->obj);
+err:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_context_size(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that our context sizes are correct by seeing if the
+ * HW tries to write past the end of one.
+ */
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ fixme = kernel_context(gt->i915);
+ if (IS_ERR(fixme)) {
+ err = PTR_ERR(fixme);
+ goto unlock;
+ }
+
+ for_each_engine(engine, gt->i915, id) {
+ struct {
+ struct drm_i915_gem_object *state;
+ void *pinned;
+ } saved;
+
+ if (!engine->context_size)
+ continue;
+
+ intel_engine_pm_get(engine);
+
+ /*
+ * Hide the old default state -- we lie about the context size
+ * and get confused when the default state is smaller than
+ * expected. For our do nothing request, inheriting the
+ * active state is sufficient, we are only checking that we
+ * don't use more than we planned.
+ */
+ saved.state = fetch_and_zero(&engine->default_state);
+ saved.pinned = fetch_and_zero(&engine->pinned_default_state);
+
+ /* Overlaps with the execlists redzone */
+ engine->context_size += I915_GTT_PAGE_SIZE;
+
+ err = __live_context_size(engine, fixme);
+
+ engine->context_size -= I915_GTT_PAGE_SIZE;
+
+ engine->pinned_default_state = saved.pinned;
+ engine->default_state = saved.state;
+
+ intel_engine_pm_put(engine);
+
+ if (err)
+ break;
+ }
+
+ kernel_context_close(fixme);
+unlock:
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ return err;
+}
+
+static int __live_active_context(struct intel_engine_cs *engine,
+ struct i915_gem_context *fixme)
+{
+ struct intel_context *ce;
+ int pass;
+ int err;
+
+ /*
+ * We keep active contexts alive until after a subsequent context
+ * switch as the final write from the context-save will be after
+ * we retire the final request. We track when we unpin the context,
+ * under the presumption that the final pin is from the last request,
+ * and instead of immediately unpinning the context, we add a task
+ * to unpin the context from the next idle-barrier.
+ *
+ * This test makes sure that the context is kept alive until a
+ * subsequent idle-barrier (emitted when the engine wakeref hits 0
+ * with no more outstanding requests).
+ */
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (pass = 0; pass <= 2; pass++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto err;
+
+ /* Context will be kept active until after an idle-barrier. */
+ if (i915_active_is_idle(&ce->active)) {
+ pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (!intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is asleep before idle-barrier\n",
+ engine->name);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ /* Now make sure our idle-barriers are flushed */
+ err = context_sync(engine->kernel_context);
+ if (err)
+ goto err;
+
+ if (!i915_active_is_idle(&ce->active)) {
+ pr_err("context is still active!");
+ err = -EINVAL;
+ }
+
+ if (intel_engine_pm_is_awake(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p,
+ "%s is still awake after idle-barriers\n",
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_active_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ enum intel_engine_id id;
+ struct drm_file *file;
+ int err = 0;
+
+ file = mock_file(gt->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ fixme = live_context(gt->i915, file);
+ if (IS_ERR(fixme)) {
+ err = PTR_ERR(fixme);
+ goto unlock;
+ }
+
+ for_each_engine(engine, gt->i915, id) {
+ err = __live_active_context(engine, fixme);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ if (err)
+ break;
+ }
+
+unlock:
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ mock_file_free(gt->i915, file);
+ return err;
+}
+
+static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(remote);
+ if (err)
+ return err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ err = intel_context_prepare_remote_request(remote, rq);
+ if (err) {
+ i915_request_add(rq);
+ goto unpin;
+ }
+
+ err = request_sync(rq);
+
+unpin:
+ intel_context_unpin(remote);
+ return err;
+}
+
+static int __live_remote_context(struct intel_engine_cs *engine,
+ struct i915_gem_context *fixme)
+{
+ struct intel_context *local, *remote;
+ int pass;
+ int err;
+
+ /*
+ * Check that our idle barriers do not interfere with normal
+ * activity tracking. In particular, check that operating
+ * on the context image remotely (intel_context_prepare_remote_request),
+ * which inserts foreign fences into intel_context.active, does not
+ * clobber the idle-barrier.
+ */
+
+ remote = intel_context_create(fixme, engine);
+ if (IS_ERR(remote))
+ return PTR_ERR(remote);
+
+ local = intel_context_create(fixme, engine);
+ if (IS_ERR(local)) {
+ err = PTR_ERR(local);
+ goto err_remote;
+ }
+
+ for (pass = 0; pass <= 2; pass++) {
+ err = __remote_sync(local, remote);
+ if (err)
+ break;
+
+ err = __remote_sync(engine->kernel_context, remote);
+ if (err)
+ break;
+
+ if (i915_active_is_idle(&remote->active)) {
+ pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ intel_context_put(local);
+err_remote:
+ intel_context_put(remote);
+ return err;
+}
+
+static int live_remote_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ enum intel_engine_id id;
+ struct drm_file *file;
+ int err = 0;
+
+ file = mock_file(gt->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ fixme = live_context(gt->i915, file);
+ if (IS_ERR(fixme)) {
+ err = PTR_ERR(fixme);
+ goto unlock;
+ }
+
+ for_each_engine(engine, gt->i915, id) {
+ err = __live_remote_context(engine, fixme);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ if (err)
+ break;
+ }
+
+unlock:
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ mock_file_free(gt->i915, file);
+ return err;
+}
+
+int intel_context_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_context_size),
+ SUBTEST(live_active_context),
+ SUBTEST(live_remote_context),
+ };
+ struct intel_gt *gt = &i915->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
new file mode 100644
index 000000000000..f65b118e261d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+
+int intel_engine_live_selftests(struct drm_i915_private *i915)
+{
+ static int (* const tests[])(struct intel_gt *) = {
+ live_engine_pm_selftests,
+ NULL,
+ };
+ struct intel_gt *gt = &i915->gt;
+ typeof(*tests) *fn;
+
+ for (fn = tests; *fn; fn++) {
+ int err;
+
+ err = (*fn)(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.h b/drivers/gpu/drm/i915/gt/selftest_engine.h
new file mode 100644
index 000000000000..ab32d09ec5a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_H
+#define SELFTEST_ENGINE_H
+
+struct intel_gt;
+
+int live_engine_pm_selftests(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index cfaa6b296835..3880f07c29b8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -12,19 +12,18 @@ static int intel_mmio_bases_check(void *arg)
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
const struct engine_info *info = &intel_engines[i];
- char name[INTEL_ENGINE_CS_MAX_NAME];
u8 prev = U8_MAX;
- __sprint_engine_name(name, info);
-
for (j = 0; j < MAX_MMIO_BASES; j++) {
u8 gen = info->mmio_bases[j].gen;
u32 base = info->mmio_bases[j].base;
if (gen >= prev) {
- pr_err("%s: %s: mmio base for gen %x "
- "is before the one for gen %x\n",
- __func__, name, prev, gen);
+ pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ prev, gen);
return -EINVAL;
}
@@ -32,17 +31,22 @@ static int intel_mmio_bases_check(void *arg)
break;
if (!base) {
- pr_err("%s: %s: invalid mmio base (%x) "
- "for gen %x at entry %u\n",
- __func__, name, base, gen, j);
+ pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ base, gen, j);
return -EINVAL;
}
prev = gen;
}
- pr_info("%s: min gen supported for %s = %d\n",
- __func__, name, prev);
+ pr_debug("%s: min gen supported for %s%d is %d\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->instance,
+ prev);
}
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
new file mode 100644
index 000000000000..3a1419376912
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -0,0 +1,83 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+#include "selftests/igt_atomic.h"
+
+static int live_engine_pm(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check we can call intel_engine_pm_put from any context. No
+ * failures are reported directly, but if we mess up lockdep should
+ * tell us.
+ */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("Unable to flush GT pm before test\n");
+ return -EBUSY;
+ }
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt->i915, id) {
+ const typeof(*igt_atomic_phases) *p;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ /*
+ * Acquisition is always synchronous, except if we
+ * know that the engine is already awake, in which
+ * case we should use intel_engine_pm_get_if_awake()
+ * to atomically grab the wakeref.
+ *
+ * In practice,
+ * intel_engine_pm_get();
+ * intel_engine_pm_put();
+ * occurs in one thread, while simultaneously
+ * intel_engine_pm_get_if_awake();
+ * intel_engine_pm_put();
+ * occurs from atomic context in another.
+ */
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+ intel_engine_pm_get(engine);
+
+ p->critical_section_begin();
+ if (!intel_engine_pm_get_if_awake(engine))
+ pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
+ engine->name, p->name);
+ else
+ intel_engine_pm_put(engine);
+ intel_engine_pm_put(engine);
+ p->critical_section_end();
+
+ /* engine wakeref is sync (instant) */
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is still awake after flushing pm\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ /* gt wakeref is async (deferred to workqueue) */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("GT failed to idle\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int live_engine_pm_selftests(struct intel_gt *gt)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_pm),
+ };
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 1ee4c923044f..a0098fc35921 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -25,13 +25,13 @@
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "intel_engine_pm.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
-#include "selftests/igt_wedge_me.h"
#include "selftests/igt_atomic.h"
#include "selftests/mock_drm.h"
@@ -42,7 +42,7 @@
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
struct hang {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
@@ -50,27 +50,27 @@ struct hang {
u32 *batch;
};
-static int hang_init(struct hang *h, struct drm_i915_private *i915)
+static int hang_init(struct hang *h, struct intel_gt *gt)
{
void *vaddr;
int err;
memset(h, 0, sizeof(*h));
- h->i915 = i915;
+ h->gt = gt;
- h->ctx = kernel_context(i915);
+ h->ctx = kernel_context(gt->i915);
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
- h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
goto err_ctx;
}
- h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->obj)) {
err = PTR_ERR(h->obj);
goto err_hws;
@@ -85,7 +85,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map(h->obj,
- i915_coherent_map_type(i915));
+ i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -118,7 +118,10 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, flags);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
@@ -127,35 +130,31 @@ static int move_to_active(struct i915_vma *vma,
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
+ struct intel_gt *gt = h->gt;
+ struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+ struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
+ void *vaddr;
u32 *batch;
int err;
- if (i915_gem_object_is_active(h->obj)) {
- struct drm_i915_gem_object *obj;
- void *vaddr;
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
- obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vaddr = i915_gem_object_pin_map(obj,
- i915_coherent_map_type(h->i915));
- if (IS_ERR(vaddr)) {
- i915_gem_object_put(obj);
- return ERR_CAST(vaddr);
- }
+ vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vaddr);
+ }
- i915_gem_object_unpin_map(h->obj);
- i915_gem_object_put(h->obj);
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
- h->obj = obj;
- h->batch = vaddr;
- }
+ h->obj = obj;
+ h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma))
@@ -188,7 +187,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
goto cancel_rq;
batch = h->batch;
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(gt->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
@@ -202,7 +201,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
- } else if (INTEL_GEN(i915) >= 6) {
+ } else if (INTEL_GEN(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -215,7 +214,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (INTEL_GEN(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -242,7 +241,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
- i915_gem_chipset_flush(h->i915);
+ intel_gt_chipset_flush(engine->gt);
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
@@ -251,7 +250,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
}
flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
+ if (INTEL_GEN(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
@@ -276,7 +275,7 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(h->i915);
+ intel_gt_chipset_flush(h->gt);
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
@@ -286,7 +285,7 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
- igt_flush_test(h->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -301,7 +300,7 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
static int igt_hang_sanitycheck(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_request *rq;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -310,13 +309,13 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, i915, id) {
- struct igt_wedge_me w;
+ for_each_engine(engine, gt->i915, id) {
+ struct intel_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -333,15 +332,15 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_request_add(rq);
timeout = 0;
- igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
timeout = -EIO;
i915_request_put(rq);
@@ -357,7 +356,7 @@ static int igt_hang_sanitycheck(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -368,37 +367,37 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
static int igt_reset_nop(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
unsigned int reset_count, count;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
IGT_TIMEOUT(end_time);
int err = 0;
/* Check that we can reset during non-user portions of requests */
- file = mock_file(i915);
+ file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ ctx = live_context(gt->i915, file);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- reset_count = i915_reset_count(&i915->gpu_error);
+ reset_count = i915_reset_count(global);
count = 0;
do {
- mutex_lock(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id) {
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ for_each_engine(engine, gt->i915, id) {
int i;
for (i = 0; i < 16; i++) {
@@ -413,82 +412,78 @@ static int igt_reset_nop(void *arg)
i915_request_add(rq);
}
}
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_lock(i915);
- i915_reset(i915, ALL_ENGINES, NULL);
- igt_global_reset_unlock(i915);
- if (i915_reset_failed(i915)) {
+ igt_global_reset_lock(gt);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(gt);
+
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
}
- if (i915_reset_count(&i915->gpu_error) !=
- reset_count + ++count) {
+ if (i915_reset_count(global) != reset_count + ++count) {
pr_err("Full GPU reset not recorded!\n");
err = -EINVAL;
break;
}
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915, 0);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
out:
- mock_file_free(i915, file);
- if (i915_reset_failed(i915))
+ mock_file_free(gt->i915, file);
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
}
static int igt_reset_nop_engine(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
int err = 0;
/* Check that we can engine-reset during non-user portions */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
- file = mock_file(i915);
+ file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ ctx = live_context(gt->i915, file);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
IGT_TIMEOUT(end_time);
- reset_count = i915_reset_count(&i915->gpu_error);
- reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
- engine);
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -499,7 +494,7 @@ static int igt_reset_nop_engine(void *arg)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
@@ -511,21 +506,20 @@ static int igt_reset_nop_engine(void *arg)
i915_request_add(rq);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
}
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
- if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ if (i915_reset_engine_count(global, engine) !=
reset_engine_count + ++count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
@@ -533,31 +527,31 @@ static int igt_reset_nop_engine(void *arg)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
if (err)
break;
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915, 0);
if (err)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
- mock_file_free(i915, file);
- if (i915_reset_failed(i915))
+ mock_file_free(gt->i915, file);
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
}
-static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
+static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -565,18 +559,18 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
/* Check that we can issue an engine reset on an idle engine (no-op) */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
if (active) {
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
unsigned int reset_count, reset_engine_count;
IGT_TIMEOUT(end_time);
@@ -590,30 +584,29 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- reset_count = i915_reset_count(&i915->gpu_error);
- reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
- engine);
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
intel_engine_pm_get(engine);
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
struct i915_request *rq;
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
@@ -628,19 +621,19 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
i915_request_put(rq);
}
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
}
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
- if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ if (i915_reset_engine_count(global, engine) !=
++reset_engine_count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
@@ -648,24 +641,24 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
intel_engine_pm_put(engine);
if (err)
break;
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915, 0);
if (err)
break;
}
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
if (active) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
hang_fini(&h);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
}
return err;
@@ -707,7 +700,7 @@ static int active_request_put(struct i915_request *rq)
rq->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(rq->i915);
+ intel_gt_set_wedged(rq->engine->gt);
err = -EIO;
}
@@ -784,10 +777,11 @@ err_file:
return err;
}
-static int __igt_reset_engines(struct drm_i915_private *i915,
+static int __igt_reset_engines(struct intel_gt *gt,
const char *test_name,
unsigned int flags)
{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
struct hang h;
@@ -797,13 +791,13 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
* with any other engine.
*/
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
@@ -811,9 +805,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
h.ctx->sched.priority = 1024;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
struct active_engine threads[I915_NUM_ENGINES] = {};
- unsigned long global = i915_reset_count(&i915->gpu_error);
+ unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
IGT_TIMEOUT(end_time);
@@ -829,12 +823,11 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
memset(threads, 0, sizeof(threads));
- for_each_engine(other, i915, tmp) {
+ for_each_engine(other, gt->i915, tmp) {
struct task_struct *tsk;
threads[tmp].resets =
- i915_reset_engine_count(&i915->gpu_error,
- other);
+ i915_reset_engine_count(global, other);
if (!(flags & TEST_OTHERS))
continue;
@@ -857,25 +850,25 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
intel_engine_pm_get(engine);
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
@@ -888,7 +881,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
}
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
engine->name, test_name, err);
@@ -900,7 +893,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (rq) {
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to complete request after reset\n",
@@ -910,7 +903,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
i915_request_put(rq);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
break;
}
@@ -920,7 +913,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to idle after reset\n",
@@ -932,12 +925,12 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
intel_engine_pm_put(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
- reported = i915_reset_engine_count(&i915->gpu_error, engine);
+ reported = i915_reset_engine_count(global, engine);
reported -= threads[engine->id].resets;
if (reported != count) {
pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
@@ -947,7 +940,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
unwind:
- for_each_engine(other, i915, tmp) {
+ for_each_engine(other, gt->i915, tmp) {
int ret;
if (!threads[tmp].task)
@@ -962,22 +955,21 @@ unwind:
}
put_task_struct(threads[tmp].task);
- if (other != engine &&
+ if (other->uabi_class != engine->uabi_class &&
threads[tmp].resets !=
- i915_reset_engine_count(&i915->gpu_error, other)) {
+ i915_reset_engine_count(global, other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
other->name,
- i915_reset_engine_count(&i915->gpu_error,
- other) -
+ i915_reset_engine_count(global, other) -
threads[tmp].resets);
if (!err)
err = -EINVAL;
}
}
- if (global != i915_reset_count(&i915->gpu_error)) {
+ if (device != i915_reset_count(global)) {
pr_err("Global reset (count=%ld)!\n",
- i915_reset_count(&i915->gpu_error) - global);
+ i915_reset_count(global) - device);
if (!err)
err = -EINVAL;
}
@@ -985,20 +977,20 @@ unwind:
if (err)
break;
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
break;
}
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
hang_fini(&h);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
}
return err;
@@ -1024,13 +1016,13 @@ static int igt_reset_engines(void *arg)
},
{ }
};
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
typeof(*phases) *p;
int err;
for (p = phases; p->name; p++) {
if (p->flags & TEST_PRIORITY) {
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
continue;
}
@@ -1042,38 +1034,39 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct drm_i915_private *i915,
- intel_engine_mask_t mask)
+static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
{
- u32 count = i915_reset_count(&i915->gpu_error);
+ u32 count = i915_reset_count(&gt->i915->gpu_error);
- i915_reset(i915, mask, NULL);
+ intel_gt_reset(gt, mask, NULL);
return count;
}
static int igt_reset_wait(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine = gt->i915->engine[RCS0];
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS0]))
+ if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS0]);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
@@ -1083,19 +1076,19 @@ static int igt_reset_wait(void *arg)
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto out_rq;
}
- reset_count = fake_hangcheck(i915, ALL_ENGINES);
+ reset_count = fake_hangcheck(gt, ALL_ENGINES);
timeout = i915_request_wait(rq, 0, 10);
if (timeout < 0) {
@@ -1105,7 +1098,7 @@ static int igt_reset_wait(void *arg)
goto out_rq;
}
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
goto out_rq;
@@ -1116,10 +1109,10 @@ out_rq:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1164,7 +1157,14 @@ static int evict_fence(void *data)
goto out_unlock;
}
+ err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
+ if (err) {
+ pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
+ goto out_unlock;
+ }
+
err = i915_vma_pin_fence(arg->vma);
+ i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
goto out_unlock;
@@ -1178,11 +1178,12 @@ out_unlock:
return err;
}
-static int __igt_reset_evict_vma(struct drm_i915_private *i915,
+static int __igt_reset_evict_vma(struct intel_gt *gt,
struct i915_address_space *vm,
int (*fn)(void *),
unsigned int flags)
{
+ struct intel_engine_cs *engine = gt->i915->engine[RCS0];
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
@@ -1190,17 +1191,17 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
struct hang h;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS0]))
+ if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- obj = i915_gem_object_create_internal(i915, SZ_1M);
+ obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fini;
@@ -1220,7 +1221,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
goto out_obj;
}
- rq = hang_create_request(&h, i915->engine[RCS0]);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_obj;
@@ -1246,7 +1247,10 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
i915_vma_lock(arg.vma);
- err = i915_vma_move_to_active(arg.vma, rq, flags);
+ err = i915_request_await_object(rq, arg.vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(arg.vma, rq, flags);
i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE)
@@ -1258,16 +1262,16 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
if (err)
goto out_rq;
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto out_reset;
}
@@ -1284,31 +1288,31 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
wait_for_completion(&arg.completion);
if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("igt/evict_vma kthread did not wait\n");
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto out_reset;
}
out_reset:
- igt_global_reset_lock(i915);
- fake_hangcheck(rq->i915, rq->engine->mask);
- igt_global_reset_unlock(i915);
+ igt_global_reset_lock(gt);
+ fake_hangcheck(gt, rq->engine->mask);
+ igt_global_reset_unlock(gt);
if (tsk) {
- struct igt_wedge_me w;
+ struct intel_wedge_me w;
/* The reset, even indirectly, should take less than 10ms. */
- igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
err = kthread_stop(tsk);
put_task_struct(tsk);
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
@@ -1316,9 +1320,9 @@ out_obj:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1326,26 +1330,26 @@ unlock:
static int igt_reset_evict_ggtt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
- return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
evict_vma, EXEC_OBJECT_WRITE);
}
static int igt_reset_evict_ppgtt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
struct drm_file *file;
int err;
- file = mock_file(i915);
+ file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ ctx = live_context(gt->i915, file);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
@@ -1353,29 +1357,29 @@ static int igt_reset_evict_ppgtt(void *arg)
err = 0;
if (ctx->vm) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(i915, ctx->vm,
+ err = __igt_reset_evict_vma(gt, ctx->vm,
evict_vma, EXEC_OBJECT_WRITE);
out:
- mock_file_free(i915, file);
+ mock_file_free(gt->i915, file);
return err;
}
static int igt_reset_evict_fence(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
- return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
evict_fence, EXEC_OBJECT_NEEDS_FENCE);
}
-static int wait_for_others(struct drm_i915_private *i915,
+static int wait_for_others(struct intel_gt *gt,
struct intel_engine_cs *exclude)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
if (engine == exclude)
continue;
@@ -1388,7 +1392,8 @@ static int wait_for_others(struct drm_i915_private *i915,
static int igt_reset_queue(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -1396,14 +1401,14 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
@@ -1444,7 +1449,7 @@ static int igt_reset_queue(void *arg)
* (hangcheck), or we focus on resetting just one
* engine and so avoid repeatedly resetting innocents.
*/
- err = wait_for_others(i915, engine);
+ err = wait_for_others(gt, engine);
if (err) {
pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
__func__, engine->name);
@@ -1452,12 +1457,12 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto fini;
}
if (!wait_until_running(&h, prev)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
@@ -1468,13 +1473,13 @@ static int igt_reset_queue(void *arg)
i915_request_put(rq);
i915_request_put(prev);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto fini;
}
- reset_count = fake_hangcheck(i915, BIT(id));
+ reset_count = fake_hangcheck(gt, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1494,7 +1499,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
i915_request_put(rq);
i915_request_put(prev);
@@ -1509,11 +1514,11 @@ static int igt_reset_queue(void *arg)
pr_info("%s: Completed %d resets\n", engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_request_put(prev);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
if (err)
break;
}
@@ -1521,10 +1526,10 @@ static int igt_reset_queue(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1532,8 +1537,9 @@ unlock:
static int igt_handle_error(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine = gt->i915->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1541,15 +1547,15 @@ static int igt_handle_error(void *arg)
/* Check that we can issue a global GPU and engine reset */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ err = hang_init(&h, gt);
if (err)
goto err_unlock;
@@ -1563,28 +1569,28 @@ static int igt_handle_error(void *arg)
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_request;
}
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
/* Temporarily disable error capture */
- error = xchg(&i915->gpu_error.first_error, (void *)-1);
+ error = xchg(&global->first_error, (void *)-1);
- i915_handle_error(i915, engine->mask, 0, NULL);
+ intel_gt_handle_error(gt, engine->mask, 0, NULL);
- xchg(&i915->gpu_error.first_error, error);
+ xchg(&global->first_error, error);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
@@ -1597,7 +1603,7 @@ err_request:
err_fini:
hang_fini(&h);
err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -1614,7 +1620,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
tasklet_disable_nosync(t);
p->critical_section_begin();
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
p->critical_section_end();
tasklet_enable(t);
@@ -1629,7 +1635,6 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct igt_atomic_section *p)
{
- struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
struct hang h;
int err;
@@ -1638,7 +1643,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
if (err)
return err;
- err = hang_init(&h, i915);
+ err = hang_init(&h, engine->gt);
if (err)
return err;
@@ -1657,16 +1662,16 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
rq->fence.seqno, hws_seqno(&h, rq));
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(engine->gt);
err = -EIO;
}
if (err == 0) {
- struct igt_wedge_me w;
+ struct intel_wedge_me w;
- igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
+ intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(engine->gt))
err = -EIO;
}
@@ -1678,30 +1683,30 @@ out:
static int igt_reset_engines_atomic(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the engines resets are usable from atomic context */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- igt_global_reset_lock(i915);
- mutex_lock(&i915->drm.struct_mutex);
+ igt_global_reset_lock(gt);
+ mutex_lock(&gt->i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
@@ -1710,11 +1715,11 @@ static int igt_reset_engines_atomic(void *arg)
out:
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
+ igt_force_reset(gt);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ igt_global_reset_unlock(gt);
return err;
}
@@ -1736,28 +1741,29 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
};
+ struct intel_gt *gt = &i915->gt;
intel_wakeref_t wakeref;
bool saved_hangcheck;
int err;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt->i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
- drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
+ drain_delayed_work(&gt->hangcheck.work); /* flush param */
- err = i915_subtests(tests, i915);
+ err = intel_gt_live_subtests(tests, gt);
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 401e8b539297..d791158988d6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -22,9 +22,9 @@
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
+ struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
- enum intel_engine_id id;
+ struct intel_context *ce;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err = -ENOMEM;
@@ -35,17 +35,17 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin, i915))
+ if (igt_spinner_init(&spin, &i915->gt))
goto err_unlock;
ctx = kernel_context(i915);
if (!ctx)
goto err_spin;
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
@@ -55,7 +55,7 @@ static int live_sanitycheck(void *arg)
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx;
}
@@ -69,16 +69,236 @@ static int live_sanitycheck(void *arg)
err = 0;
err_ctx:
+ i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
err_spin:
igt_spinner_fini(&spin);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+ int err;
+
+ ctx = kernel_context(engine->i915);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq))
+ goto out_ctx;
+
+ err = emit_semaphore_chain(rq, vma, idx);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ctx:
+ kernel_context_close(ctx);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ i915_request_get(head);
+ for_each_engine(engine, outer->i915, id) {
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ }
+ }
+
+ err = release_queue(outer, vma, n);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head,
+ I915_WAIT_LOCKED,
+ 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
+ count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+ int count;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_unlock;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_prime_number_from(count, 1, 16) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ err = slice_semaphore_queue(engine, vma, count);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_unlock:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
static int live_busywait_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -138,6 +358,9 @@ static int live_busywait_preempt(void *arg)
struct igt_live_test t;
u32 *cs;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
if (!intel_engine_can_store_dword(engine))
continue;
@@ -229,7 +452,7 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_vma;
}
@@ -253,13 +476,29 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -279,10 +518,10 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -309,8 +548,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -320,13 +559,13 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -337,7 +576,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -361,7 +600,6 @@ err_spin_lo:
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -384,10 +622,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -398,6 +636,9 @@ static int live_late_preempt(void *arg)
if (!ctx_lo)
goto err_ctx_hi;
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
for_each_engine(engine, i915, id) {
struct igt_live_test t;
struct i915_request *rq;
@@ -410,8 +651,8 @@ static int live_late_preempt(void *arg)
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -423,8 +664,8 @@ static int live_late_preempt(void *arg)
goto err_wedged;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_NOOP);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -465,7 +706,6 @@ err_spin_lo:
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -473,7 +713,7 @@ err_unlock:
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -490,7 +730,7 @@ static int preempt_client_init(struct drm_i915_private *i915,
if (!c->ctx)
return -ENOMEM;
- if (igt_spinner_init(&c->spin, i915))
+ if (igt_spinner_init(&c->spin, &i915->gt))
goto err_ctx;
return 0;
@@ -506,6 +746,114 @@ static void preempt_client_fini(struct preempt_client *c)
kernel_context_close(c->ctx);
}
+static int live_nopreempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ if (preempt_client_init(i915, &a))
+ goto err_unlock;
+ if (preempt_client_init(i915, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ rq_a->flags |= I915_REQUEST_NOPREEMPT;
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+err_unlock:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(&i915->gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
static int live_suppress_self_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -531,6 +879,9 @@ static int live_suppress_self_preempt(void *arg)
if (USES_GUC_SUBMISSION(i915))
return 0; /* presume black blox */
+ if (intel_vgpu_active(i915))
+ return 0; /* GVT forces single port & request submission */
+
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -548,9 +899,9 @@ static int live_suppress_self_preempt(void *arg)
engine->execlists.preempt_hang.count = 0;
- rq_a = igt_spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_NOOP);
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
goto err_client_b;
@@ -562,10 +913,12 @@ static int live_suppress_self_preempt(void *arg)
goto err_wedged;
}
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
for (depth = 0; depth < 8; depth++) {
- rq_b = igt_spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_NOOP);
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
goto err_client_b;
@@ -587,7 +940,8 @@ static int live_suppress_self_preempt(void *arg)
igt_spinner_end(&a.spin);
if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
engine->execlists.preempt_hang.count,
depth);
err = -EINVAL;
@@ -604,8 +958,6 @@ err_client_b:
err_client_a:
preempt_client_fini(&a);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -613,7 +965,7 @@ err_unlock:
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_client_b;
}
@@ -646,6 +998,10 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
i915_sw_fence_init(&rq->submit, dummy_notify);
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+ spin_lock_init(&rq->lock);
+ rq->fence.lock = &rq->lock;
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+
return rq;
}
@@ -714,9 +1070,9 @@ static int live_suppress_wait_preempt(void *arg)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- rq[i] = igt_spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
+ rq[i] = spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq[i])) {
err = PTR_ERR(rq[i]);
goto err_wedged;
@@ -773,8 +1129,6 @@ err_client_1:
err_client_0:
preempt_client_fini(&client[0]);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -782,7 +1136,7 @@ err_unlock:
err_wedged:
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_client_3;
}
@@ -825,9 +1179,9 @@ static int live_chain_preempt(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = igt_spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
@@ -851,18 +1205,18 @@ static int live_chain_preempt(void *arg)
}
for_each_prime_number_from(count, 1, ring_size) {
- rq = igt_spinner_create_request(&hi.spin,
- hi.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
if (!igt_wait_for_spinner(&hi.spin, rq))
goto err_wedged;
- rq = igt_spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
@@ -921,8 +1275,6 @@ err_client_lo:
err_client_hi:
preempt_client_fini(&hi);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -930,7 +1282,7 @@ err_unlock:
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_client_lo;
}
@@ -954,10 +1306,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -978,8 +1330,8 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -989,13 +1341,13 @@ static int live_preempt_hang(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -1011,21 +1363,21 @@ static int live_preempt_hang(void *arg)
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- i915_reset_engine(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ intel_engine_reset(engine, NULL);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -1048,7 +1400,6 @@ err_spin_lo:
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -1108,11 +1459,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
if (vma) {
i915_vma_lock(vma);
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
+ err = i915_request_await_object(rq, vma->obj, false);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
i915_vma_unlock(vma);
}
@@ -1406,7 +1759,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
break;
}
}
@@ -1444,6 +1797,7 @@ static int live_virtual_engine(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
+ struct intel_gt *gt = &i915->gt;
enum intel_engine_id id;
unsigned int class, inst;
int err = -ENODEV;
@@ -1467,10 +1821,10 @@ static int live_virtual_engine(void *arg)
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
continue;
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
@@ -1553,7 +1907,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto out;
}
@@ -1591,6 +1945,7 @@ static int live_virtual_mask(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
int err = 0;
@@ -1604,10 +1959,10 @@ static int live_virtual_mask(void *arg)
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
break;
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
@@ -1768,6 +2123,7 @@ static int live_virtual_bond(void *arg)
};
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
int err = 0;
@@ -1782,11 +2138,11 @@ static int live_virtual_bond(void *arg)
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
break;
GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
@@ -1812,9 +2168,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_timeslice_preempt),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
@@ -1828,8 +2186,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (!HAS_EXECLISTS(i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index b5c590c9ccba..00a4f60cdfd5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,26 +9,29 @@
static int igt_global_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
unsigned int reset_count;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that we can issue a global GPU reset */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- reset_count = i915_reset_count(&i915->gpu_error);
+ reset_count = i915_reset_count(&gt->i915->gpu_error);
- i915_reset(i915, ALL_ENGINES, NULL);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
}
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
@@ -36,64 +39,123 @@ static int igt_global_reset(void *arg)
static int igt_wedged_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
intel_wakeref_t wakeref;
/* Check that we can recover a wedged device with a GPU reset */
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
- GEM_BUG_ON(!i915_reset_failed(i915));
- i915_reset(i915, ALL_ENGINES, NULL);
+ GEM_BUG_ON(!intel_gt_is_wedged(gt));
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ igt_global_reset_unlock(gt);
- return i915_reset_failed(i915) ? -EIO : 0;
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
}
static int igt_atomic_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the resets are usable from atomic context */
- igt_global_reset_lock(i915);
- mutex_lock(&i915->drm.struct_mutex);
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
intel_engine_mask_t awake;
- GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+ GEM_TRACE("__intel_gt_reset under %s\n", p->name);
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
p->critical_section_begin();
- reset_prepare(i915);
- err = intel_gpu_reset(i915, ALL_ENGINES);
+
+ err = __intel_gt_reset(gt, ALL_ENGINES);
+
p->critical_section_end();
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
if (err) {
- pr_err("intel_gpu_reset failed under %s\n", p->name);
+ pr_err("__intel_gt_reset failed under %s\n", p->name);
break;
}
}
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
+ igt_force_reset(gt);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
+static int igt_atomic_engine_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ const typeof(*igt_atomic_phases) *p;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ if (!intel_has_reset_engine(gt->i915))
+ return 0;
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(gt))
+ goto out_unlock;
+
+ for_each_engine(engine, gt->i915, id) {
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+ intel_engine_pm_get(engine);
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ GEM_TRACE("intel_engine_reset(%s) under %s\n",
+ engine->name, p->name);
+
+ p->critical_section_begin();
+ err = intel_engine_reset(engine, NULL);
+ p->critical_section_end();
+
+ if (err) {
+ pr_err("intel_engine_reset(%s) failed under %s\n",
+ engine->name, p->name);
+ break;
+ }
+ }
+
+ intel_engine_pm_put(engine);
+ tasklet_enable(&engine->execlists.tasklet);
+ if (err)
+ break;
+ }
+
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(gt);
+
+out_unlock:
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
return err;
}
@@ -104,18 +166,15 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_atomic_reset),
+ SUBTEST(igt_atomic_engine_reset),
};
- intel_wakeref_t wakeref;
- int err = 0;
+ struct intel_gt *gt = &i915->gt;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt->i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- return err;
+ return intel_gt_live_subtests(tests, gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 76d3977f1d4b..321481403165 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -7,15 +7,16 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "intel_gt.h"
-#include "i915_random.h"
-#include "i915_selftest.h"
+#include "../selftests/i915_random.h"
+#include "../i915_selftest.h"
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
-#include "mock_timeline.h"
+#include "../selftests/igt_flush_test.h"
+#include "../selftests/mock_gem_device.h"
+#include "selftests/mock_timeline.h"
-static struct page *hwsp_page(struct i915_timeline *tl)
+static struct page *hwsp_page(struct intel_timeline *tl)
{
struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
@@ -23,7 +24,7 @@ static struct page *hwsp_page(struct i915_timeline *tl)
return sg_page(obj->mm.pages->sgl);
}
-static unsigned long hwsp_cacheline(struct i915_timeline *tl)
+static unsigned long hwsp_cacheline(struct intel_timeline *tl)
{
unsigned long address = (unsigned long)page_address(hwsp_page(tl));
@@ -35,7 +36,7 @@ static unsigned long hwsp_cacheline(struct i915_timeline *tl)
struct mock_hwsp_freelist {
struct drm_i915_private *i915;
struct radix_tree_root cachelines;
- struct i915_timeline **history;
+ struct intel_timeline **history;
unsigned long count, max;
struct rnd_state prng;
};
@@ -46,12 +47,12 @@ enum {
static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
unsigned int idx,
- struct i915_timeline *tl)
+ struct intel_timeline *tl)
{
tl = xchg(&state->history[idx], tl);
if (tl) {
radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
}
@@ -59,14 +60,14 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned int count,
unsigned int flags)
{
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
unsigned int idx;
while (count--) {
unsigned long cacheline;
int err;
- tl = i915_timeline_create(state->i915, NULL);
+ tl = intel_timeline_create(&state->i915->gt, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -77,7 +78,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
cacheline);
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
return err;
}
@@ -162,21 +163,21 @@ struct __igt_sync {
bool set;
};
-static int __igt_sync(struct i915_timeline *tl,
+static int __igt_sync(struct intel_timeline *tl,
u64 ctx,
const struct __igt_sync *p,
const char *name)
{
int ret;
- if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+ if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
name, p->name, ctx, p->seqno, yesno(p->expected));
return -EINVAL;
}
if (p->set) {
- ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
+ ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
if (ret)
return ret;
}
@@ -204,7 +205,7 @@ static int igt_sync(void *arg)
{ "unwrap", UINT_MAX, true, false },
{},
}, *p;
- struct i915_timeline tl;
+ struct intel_timeline tl;
int order, offset;
int ret = -ENODEV;
@@ -248,7 +249,7 @@ static unsigned int random_engine(struct rnd_state *rnd)
static int bench_sync(void *arg)
{
struct rnd_state prng;
- struct i915_timeline tl;
+ struct intel_timeline tl;
unsigned long end_time, count;
u64 prng32_1M;
ktime_t kt;
@@ -286,7 +287,7 @@ static int bench_sync(void *arg)
do {
u64 id = i915_prandom_u64_state(&prng);
- __i915_timeline_sync_set(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
@@ -301,7 +302,7 @@ static int bench_sync(void *arg)
while (end_time--) {
u64 id = i915_prandom_u64_state(&prng);
- if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
+ if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
mock_timeline_fini(&tl);
pr_err("Lookup of %llu failed\n", id);
return -EINVAL;
@@ -322,7 +323,7 @@ static int bench_sync(void *arg)
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
- __i915_timeline_sync_set(&tl, count++, 0);
+ __intel_timeline_sync_set(&tl, count++, 0);
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
pr_info("%s: %lu in-order insertions, %lluns/insert\n",
@@ -332,7 +333,7 @@ static int bench_sync(void *arg)
end_time = count;
kt = ktime_get();
while (end_time--) {
- if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
+ if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
pr_err("Lookup of %lu failed\n", end_time);
mock_timeline_fini(&tl);
return -EINVAL;
@@ -356,8 +357,8 @@ static int bench_sync(void *arg)
u32 id = random_engine(&prng);
u32 seqno = prandom_u32_state(&prng);
- if (!__i915_timeline_sync_is_later(&tl, id, seqno))
- __i915_timeline_sync_set(&tl, id, seqno);
+ if (!__intel_timeline_sync_is_later(&tl, id, seqno))
+ __intel_timeline_sync_set(&tl, id, seqno);
count++;
} while (!time_after(jiffies, end_time));
@@ -385,8 +386,8 @@ static int bench_sync(void *arg)
*/
u64 id = (u64)(count & mask) << order;
- __i915_timeline_sync_is_later(&tl, id, 0);
- __i915_timeline_sync_set(&tl, id, 0);
+ __intel_timeline_sync_is_later(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
@@ -401,7 +402,7 @@ static int bench_sync(void *arg)
return 0;
}
-int i915_timeline_mock_selftests(void)
+int intel_timeline_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(mock_hwsp_freelist),
@@ -443,14 +444,14 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
}
static struct i915_request *
-tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
+tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
{
struct i915_request *rq;
int err;
- lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */
+ lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
- err = i915_timeline_pin(tl);
+ err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
goto out;
@@ -466,26 +467,26 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
rq = ERR_PTR(err);
out_unpin:
- i915_timeline_unpin(tl);
+ intel_timeline_unpin(tl);
out:
if (IS_ERR(rq))
pr_err("Failed to write to timeline!\n");
return rq;
}
-static struct i915_timeline *
-checked_i915_timeline_create(struct drm_i915_private *i915)
+static struct intel_timeline *
+checked_intel_timeline_create(struct drm_i915_private *i915)
{
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
- tl = i915_timeline_create(i915, NULL);
+ tl = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(tl))
return tl;
if (*tl->hwsp_seqno != tl->seqno) {
pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
*tl->hwsp_seqno, tl->seqno);
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
return ERR_PTR(-EINVAL);
}
@@ -496,7 +497,7 @@ static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
+ struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
@@ -523,10 +524,10 @@ static int live_hwsp_engine(void *arg)
continue;
for (n = 0; n < NUM_TIMELINES; n++) {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
@@ -534,7 +535,7 @@ static int live_hwsp_engine(void *arg)
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
@@ -548,14 +549,14 @@ out:
err = -EIO;
for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
+ struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
n, *tl->hwsp_seqno);
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -571,7 +572,7 @@ static int live_hwsp_alternate(void *arg)
{
#define NUM_TIMELINES 4096
struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
+ struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
@@ -596,13 +597,13 @@ static int live_hwsp_alternate(void *arg)
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
for_each_engine(engine, i915, id) {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
@@ -610,7 +611,7 @@ static int live_hwsp_alternate(void *arg)
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
@@ -624,14 +625,14 @@ out:
err = -EIO;
for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
+ struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
n, *tl->hwsp_seqno);
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -647,7 +648,7 @@ static int live_hwsp_wrap(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
@@ -660,7 +661,7 @@ static int live_hwsp_wrap(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- tl = i915_timeline_create(i915, NULL);
+ tl = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out_rpm;
@@ -668,7 +669,7 @@ static int live_hwsp_wrap(void *arg)
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
- err = i915_timeline_pin(tl);
+ err = intel_timeline_pin(tl);
if (err)
goto out_free;
@@ -688,7 +689,9 @@ static int live_hwsp_wrap(void *arg)
tl->seqno = -4u;
- err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
+ mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
@@ -703,7 +706,9 @@ static int live_hwsp_wrap(void *arg)
}
hwsp_seqno[0] = tl->hwsp_seqno;
- err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
+ mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
@@ -745,9 +750,9 @@ out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- i915_timeline_unpin(tl);
+ intel_timeline_unpin(tl);
out_free:
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
@@ -781,10 +786,10 @@ static int live_hwsp_recycle(void *arg)
continue;
do {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
@@ -792,14 +797,14 @@ static int live_hwsp_recycle(void *arg)
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = -EIO;
goto out;
}
@@ -810,26 +815,22 @@ static int live_hwsp_recycle(void *arg)
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
count++;
if (err)
goto out;
-
- i915_timelines_park(i915); /* Encourage recycling! */
} while (!__igt_timeout(end_time, NULL));
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
-int i915_timeline_live_selftests(struct drm_i915_private *i915)
+int intel_timeline_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_hwsp_recycle),
@@ -838,8 +839,8 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_wrap),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 44becd9538be..d06d68ac2a3b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -5,13 +5,14 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
#include "intel_reset.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
-#include "selftests/igt_wedge_me.h"
#include "selftests/mock_drm.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -24,11 +25,9 @@ static const struct wo_register {
{ INTEL_GEMINILAKE, 0x731c }
};
-#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
struct wa_lists {
struct i915_wa_list gt_wa_list;
struct {
- char name[REF_NAME_MAX];
struct i915_wa_list wa_list;
struct i915_wa_list ctx_wa_list;
} engine[I915_NUM_ENGINES];
@@ -42,25 +41,20 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists));
- wa_init_start(&lists->gt_wa_list, "GT_REF");
+ wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
gt_init_workarounds(i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
for_each_engine(engine, i915, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list;
- char *name = lists->engine[id].name;
- snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
-
- wa_init_start(wal, name);
+ wa_init_start(wal, "REF", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
- snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
-
__intel_engine_init_ctx_wa(engine,
&lists->engine[id].ctx_wa_list,
- name);
+ "CTX_REF");
}
}
@@ -102,7 +96,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -119,7 +113,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto err_req;
@@ -184,7 +180,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
- struct igt_wedge_me wedge;
+ struct intel_wedge_me wedge;
u32 *vaddr;
int err;
int i;
@@ -195,10 +191,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
i915_gem_object_lock(results);
- igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
+ intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
- if (i915_terminally_wedged(ctx->i915))
+ if (intel_gt_is_wedged(&ctx->i915->gt))
err = -EIO;
if (err)
goto out_put;
@@ -231,13 +227,13 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, engine->mask, "live_workarounds");
+ intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, "live_workarounds");
+ return intel_engine_reset(engine, "live_workarounds");
}
static int
@@ -245,6 +241,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
struct igt_spinner *spin)
{
struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct i915_request *rq;
intel_wakeref_t wakeref;
int err = 0;
@@ -255,10 +252,14 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+
rq = ERR_PTR(-ENODEV);
with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+ intel_context_put(ce);
kernel_context_close(ctx);
if (IS_ERR(rq)) {
@@ -286,64 +287,67 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_gem_context *ctx;
+ struct i915_gem_context *ctx, *tmp;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
- engine->whitelist.count, name);
-
- err = igt_spinner_init(&spin, i915);
- if (err)
- return err;
+ pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, engine->name, name);
ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ err = igt_spinner_init(&spin, engine->gt);
+ if (err)
+ goto out_ctx;
+
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
- goto out;
+ goto out_spin;
}
err = switch_to_scratch_context(engine, &spin);
if (err)
- goto out;
+ goto out_spin;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
- igt_spinner_fini(&spin);
if (err) {
pr_err("%s reset failed\n", name);
- goto out;
+ goto out_spin;
}
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
- goto out;
+ goto out_spin;
}
+ tmp = kernel_context(i915);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto out_spin;
+ }
kernel_context_close(ctx);
-
- ctx = kernel_context(i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ctx = tmp;
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
- goto out;
+ goto out_spin;
}
-out:
+out_spin:
+ igt_spinner_fini(&spin);
+out_ctx:
kernel_context_close(ctx);
return err;
}
@@ -393,6 +397,10 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
int i;
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_WR)
+ return true;
+
for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
if (wo_registers[i].platform == platform &&
wo_registers[i].reg == reg)
@@ -404,7 +412,8 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
static bool ro_register(u32 reg)
{
- if (reg & RING_FORCE_TO_NONPRIV_RD)
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
return true;
return false;
@@ -476,12 +485,12 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
u32 srm, lrm, rsvd;
u32 expect;
int idx;
+ bool ro_reg;
if (wo_register(engine, reg))
continue;
- if (ro_register(reg))
- continue;
+ ro_reg = ro_register(reg);
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
@@ -542,7 +551,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
- i915_gem_chipset_flush(ctx->i915);
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -570,7 +579,7 @@ err_request:
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
- i915_gem_set_wedged(ctx->i915);
+ intel_gt_set_wedged(&ctx->i915->gt);
err = -EIO;
goto out_batch;
}
@@ -582,24 +591,35 @@ err_request:
}
GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
- rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
- if (!rsvd) {
- pr_err("%s: Unable to write to whitelisted register %x\n",
- engine->name, reg);
- err = -EINVAL;
- goto out_unpin;
+ if (!ro_reg) {
+ /* detect write masking */
+ rsvd = results[ARRAY_SIZE(values)];
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unpin;
+ }
}
expect = results[0];
idx = 1;
for (v = 0; v < ARRAY_SIZE(values); v++) {
- expect = reg_write(expect, values[v], rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, values[v], rsvd);
+
if (results[idx] != expect)
err++;
idx++;
}
for (v = 0; v < ARRAY_SIZE(values); v++) {
- expect = reg_write(expect, ~values[v], rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, ~values[v], rsvd);
+
if (results[idx] != expect)
err++;
idx++;
@@ -608,15 +628,22 @@ err_request:
pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
engine->name, err, reg);
- pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
- engine->name, reg, results[0], rsvd);
+ if (ro_reg)
+ pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
+ engine->name, reg, results[0]);
+ else
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
expect = results[0];
idx = 1;
for (v = 0; v < ARRAY_SIZE(values); v++) {
u32 w = values[v];
- expect = reg_write(expect, w, rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
pr_info("Wrote %08x, read %08x, expect %08x\n",
w, results[idx], expect);
idx++;
@@ -624,7 +651,10 @@ err_request:
for (v = 0; v < ARRAY_SIZE(values); v++) {
u32 w = ~values[v];
- expect = reg_write(expect, w, rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
pr_info("Wrote %08x, read %08x, expect %08x\n",
w, results[idx], expect);
idx++;
@@ -707,7 +737,7 @@ static int live_reset_whitelist(void *arg)
if (!engine || engine->whitelist.count == 0)
return 0;
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
if (intel_has_reset_engine(i915)) {
err = check_whitelist_across_reset(engine,
@@ -726,7 +756,7 @@ static int live_reset_whitelist(void *arg)
}
out:
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
return err;
}
@@ -756,8 +786,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
u64 offset = results->node.start + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
- /* Clear RD only and WR only flags */
- reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
+ /* Clear access permission field */
+ reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
*cs++ = srm;
*cs++ = reg;
@@ -806,7 +836,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
*cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(batch->obj);
- i915_gem_chipset_flush(ctx->i915);
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -927,7 +957,8 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
for (i = 0; i < engine->whitelist.count; i++) {
const struct i915_wa *wa = &engine->whitelist.list[i];
- if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+ if (i915_mmio_reg_offset(wa->reg) &
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
continue;
if (!fn(engine, a[i], b[i], wa->reg))
@@ -1060,7 +1091,7 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
enum intel_engine_id id = ce->engine->id;
ok &= engine_wa_list_verify(ce,
@@ -1071,7 +1102,6 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
&lists->engine[id].ctx_wa_list,
str) == 0;
}
- i915_gem_context_unlock_engines(ctx);
return ok;
}
@@ -1092,9 +1122,11 @@ live_gpu_reset_workarounds(void *arg)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ i915_gem_context_lock_engines(ctx);
+
pr_info("Verifying after GPU reset...\n");
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
@@ -1103,15 +1135,16 @@ live_gpu_reset_workarounds(void *arg)
if (!ok)
goto out;
- i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after reset");
out:
+ i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
return ok ? 0 : -ESRCH;
}
@@ -1120,10 +1153,10 @@ static int
live_engine_reset_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
+ struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct igt_spinner spin;
- enum intel_engine_id id;
struct i915_request *rq;
intel_wakeref_t wakeref;
struct wa_lists lists;
@@ -1136,12 +1169,13 @@ live_engine_reset_workarounds(void *arg)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ struct intel_engine_cs *engine = ce->engine;
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
@@ -1152,7 +1186,7 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_reset_engine(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after idle reset");
if (!ok) {
@@ -1160,11 +1194,11 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- ret = igt_spinner_init(&spin, i915);
+ ret = igt_spinner_init(&spin, engine->gt);
if (ret)
goto err;
- rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
igt_spinner_fini(&spin);
@@ -1180,7 +1214,7 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_reset_engine(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds");
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
@@ -1191,11 +1225,11 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
}
-
err:
+ i915_gem_context_unlock_engines(ctx);
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
kernel_context_close(ctx);
igt_flush_test(i915, I915_WAIT_LOCKED);
@@ -1214,7 +1248,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
};
int err;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 65b52be23d42..598170efcaf6 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -4,18 +4,18 @@
* Copyright © 2017-2018 Intel Corporation
*/
-#include "../i915_timeline.h"
+#include "../intel_timeline.h"
#include "mock_timeline.h"
-void mock_timeline_init(struct i915_timeline *timeline, u64 context)
+void mock_timeline_init(struct intel_timeline *timeline, u64 context)
{
- timeline->i915 = NULL;
+ timeline->gt = NULL;
timeline->fence_context = context;
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request);
+ INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -23,7 +23,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
INIT_LIST_HEAD(&timeline->link);
}
-void mock_timeline_fini(struct i915_timeline *timeline)
+void mock_timeline_fini(struct intel_timeline *timeline)
{
i915_syncmap_free(&timeline->sync);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
index b6deaa61110d..689efc66c908 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -7,9 +7,9 @@
#ifndef __MOCK_TIMELINE__
#define __MOCK_TIMELINE__
-struct i915_timeline;
+struct intel_timeline;
-void mock_timeline_init(struct i915_timeline *timeline, u64 context);
-void mock_timeline_fini(struct i915_timeline *timeline);
+void mock_timeline_init(struct intel_timeline *timeline, u64 context);
+void mock_timeline_fini(struct intel_timeline *timeline);
#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile
new file mode 100644
index 000000000000..bec94d434cb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/Makefile
@@ -0,0 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/../..
+
+# Extra header tests
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index c40a6efdd33a..249c747e9756 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
@@ -29,16 +11,16 @@
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+ intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}
static void gen11_guc_raise_irq(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
}
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
@@ -52,11 +34,11 @@ static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
void intel_guc_init_send_regs(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
enum forcewake_domains fw_domains = 0;
unsigned int i;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(gt->i915) >= 11) {
guc->send_regs.base =
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
@@ -67,7 +49,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
}
for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
+ fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
@@ -76,11 +58,12 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
+ intel_guc_submission_init_early(guc);
mutex_init(&guc->send_mutex);
spin_lock_init(&guc->irq_lock);
@@ -99,90 +82,6 @@ void intel_guc_init_early(struct intel_guc *guc)
}
}
-static int guc_init_wq(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.relay.flush_wq =
- alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (!guc->log.relay.flush_wq) {
- DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
- return -ENOMEM;
- }
-
- /*
- * Even though both sending GuC action, and adding a new workitem to
- * GuC workqueue are serialized (each with its own locking), since
- * we're using mutliple engines, it's possible that we're going to
- * issue a preempt request with two (or more - each for different
- * engine) workitems in GuC queue. In this situation, GuC may submit
- * all of them, which will make us very confused.
- * Our preemption contexts may even already be complete - before we
- * even had the chance to sent the preempt action to GuC!. Rather
- * than introducing yet another lock, we can just use ordered workqueue
- * to make sure we're always sending a single preemption request with a
- * single workitem.
- */
- if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
- USES_GUC_SUBMISSION(dev_priv)) {
- guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
- WQ_HIGHPRI);
- if (!guc->preempt_wq) {
- destroy_workqueue(guc->log.relay.flush_wq);
- DRM_ERROR("Couldn't allocate workqueue for GuC "
- "preemption\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void guc_fini_wq(struct intel_guc *guc)
-{
- struct workqueue_struct *wq;
-
- wq = fetch_and_zero(&guc->preempt_wq);
- if (wq)
- destroy_workqueue(wq);
-
- wq = fetch_and_zero(&guc->log.relay.flush_wq);
- if (wq)
- destroy_workqueue(wq);
-}
-
-int intel_guc_init_misc(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_i915(guc);
- int ret;
-
- ret = guc_init_wq(guc);
- if (ret)
- return ret;
-
- intel_uc_fw_fetch(i915, &guc->fw);
-
- return 0;
-}
-
-void intel_guc_fini_misc(struct intel_guc *guc)
-{
- intel_uc_fw_cleanup_fetch(&guc->fw);
- guc_fini_wq(guc);
-}
-
static int guc_shared_data_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -209,66 +108,6 @@ static void guc_shared_data_destroy(struct intel_guc *guc)
i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
}
-int intel_guc_init(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- ret = intel_uc_fw_init(&guc->fw);
- if (ret)
- goto err_fetch;
-
- ret = guc_shared_data_create(guc);
- if (ret)
- goto err_fw;
- GEM_BUG_ON(!guc->shared_data);
-
- ret = intel_guc_log_create(&guc->log);
- if (ret)
- goto err_shared;
-
- ret = intel_guc_ads_create(guc);
- if (ret)
- goto err_log;
- GEM_BUG_ON(!guc->ads_vma);
-
- ret = intel_guc_ct_init(&guc->ct);
- if (ret)
- goto err_ads;
-
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
-
- return 0;
-
-err_ads:
- intel_guc_ads_destroy(guc);
-err_log:
- intel_guc_log_destroy(&guc->log);
-err_shared:
- guc_shared_data_destroy(guc);
-err_fw:
- intel_uc_fw_fini(&guc->fw);
-err_fetch:
- intel_uc_fw_cleanup_fetch(&guc->fw);
- return ret;
-}
-
-void intel_guc_fini(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- i915_ggtt_disable_guc(dev_priv);
-
- intel_guc_ct_fini(&guc->ct);
-
- intel_guc_ads_destroy(guc);
- intel_guc_log_destroy(&guc->log);
- guc_shared_data_destroy(guc);
- intel_uc_fw_fini(&guc->fw);
- intel_uc_fw_cleanup_fetch(&guc->fw);
-}
-
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
@@ -287,7 +126,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!USES_GUC_SUBMISSION(guc_to_i915(guc)))
+ if (!intel_guc_is_submission_supported(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -297,7 +136,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+ if (intel_guc_is_submission_supported(guc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -364,13 +203,12 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc)
* transfer. These parameters are read by the firmware on startup
* and cannot be changed thereafter.
*/
-void intel_guc_init_params(struct intel_guc *guc)
+static void guc_init_params(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 params[GUC_CTL_MAX_DWORDS];
+ u32 *params = guc->params;
int i;
- memset(params, 0, sizeof(params));
+ BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
@@ -380,20 +218,113 @@ void intel_guc_init_params(struct intel_guc *guc)
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+void intel_guc_write_params(struct intel_guc *guc)
+{
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
+ int i;
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
- I915_WRITE(SOFT_SCRATCH(0), 0);
+ intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+ intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
+}
+
+int intel_guc_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ ret = intel_uc_fw_init(&guc->fw);
+ if (ret)
+ goto err_fetch;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ goto err_fw;
+ GEM_BUG_ON(!guc->shared_data);
+
+ ret = intel_guc_log_create(&guc->log);
+ if (ret)
+ goto err_shared;
+
+ ret = intel_guc_ads_create(guc);
+ if (ret)
+ goto err_log;
+ GEM_BUG_ON(!guc->ads_vma);
+
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
+
+ if (intel_guc_is_submission_supported(guc)) {
+ /*
+ * This is stuff we need to have available at fw load time
+ * if we are planning to enable submission later
+ */
+ ret = intel_guc_submission_init(guc);
+ if (ret)
+ goto err_ct;
+ }
+
+ /* now that everything is perma-pinned, initialize the parameters */
+ guc_init_params(guc);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(gt->ggtt);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
+ return 0;
+
+err_ct:
+ intel_guc_ct_fini(&guc->ct);
+err_ads:
+ intel_guc_ads_destroy(guc);
+err_log:
+ intel_guc_log_destroy(&guc->log);
+err_shared:
+ guc_shared_data_destroy(guc);
+err_fw:
+ intel_uc_fw_fini(&guc->fw);
+err_fetch:
+ intel_uc_fw_cleanup_fetch(&guc->fw);
+ DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
+ return ret;
+}
+
+void intel_guc_fini(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ if (!intel_uc_fw_is_available(&guc->fw))
+ return;
+
+ i915_ggtt_disable_guc(gt->ggtt);
+
+ if (intel_guc_is_submission_supported(guc))
+ intel_guc_submission_fini(guc);
+
+ intel_guc_ct_fini(&guc->ct);
+
+ intel_guc_ads_destroy(guc);
+ intel_guc_log_destroy(&guc->log);
+ guc_shared_data_destroy(guc);
+ intel_uc_fw_fini(&guc->fw);
+ intel_uc_fw_cleanup_fetch(&guc->fw);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -414,8 +345,7 @@ void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 status;
int i;
int ret;
@@ -464,7 +394,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
int count = min(response_buf_size, guc->send_regs.count - 1);
for (i = 0; i < count; i++)
- response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
+ response_buf[i] = intel_uncore_read(uncore,
+ guc_send_reg(guc, i + 1));
}
/* Use data from the GuC response as our return value */
@@ -497,7 +428,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
u32 action[2];
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
@@ -538,7 +469,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
*/
int intel_guc_suspend(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
int ret;
u32 status;
u32 action[] = {
@@ -556,13 +487,14 @@ int intel_guc_suspend(struct intel_guc *guc)
* in progress so we need to take care of that ourselves as well.
*/
- I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+ intel_uncore_write(uncore, SOFT_SCRATCH(14),
+ INTEL_GUC_SLEEP_STATE_INVALID_MASK);
ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
if (ret)
return ret;
- ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
+ ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
@@ -658,17 +590,17 @@ int intel_guc_resume(struct intel_guc *guc)
*/
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u64 flags;
int ret;
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_shmem(gt->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err;
@@ -679,7 +611,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
}
- return vma;
+ return i915_vma_make_unshrinkable(vma);
err:
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 08c906abdfa2..2b2f046d3cc3 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_H_
@@ -35,10 +16,7 @@
#include "i915_utils.h"
#include "i915_vma.h"
-struct guc_preempt_work {
- struct work_struct work;
- struct intel_engine_cs *engine;
-};
+struct __guc_ads_blob;
/*
* Top level structure of GuC. It handles firmware loading and manages client
@@ -50,21 +28,22 @@ struct intel_guc {
struct intel_guc_log log;
struct intel_guc_ct ct;
- /* Log snapshot if GuC errors during load */
- struct drm_i915_gem_object *load_err_log;
-
/* intel_guc_recv interrupt related state */
spinlock_t irq_lock;
unsigned int msg_enabled_mask;
struct {
bool enabled;
- void (*reset)(struct drm_i915_private *i915);
- void (*enable)(struct drm_i915_private *i915);
- void (*disable)(struct drm_i915_private *i915);
+ void (*reset)(struct intel_guc *guc);
+ void (*enable)(struct intel_guc *guc);
+ void (*disable)(struct intel_guc *guc);
} interrupts;
+ bool submission_supported;
+
struct i915_vma *ads_vma;
+ struct __guc_ads_blob *ads_blob;
+
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
@@ -72,15 +51,14 @@ struct intel_guc {
void *shared_data_vaddr;
struct intel_guc_client *execbuf_client;
- struct intel_guc_client *preempt_client;
-
- struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
- struct workqueue_struct *preempt_wq;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
/* Cyclic counter mod pagesize */
u32 db_cacheline;
+ /* Control params for fw initialization */
+ u32 params[GUC_CTL_MAX_DWORDS];
+
/* GuC's FW specific registers used in MMIO send */
struct {
u32 base;
@@ -88,6 +66,9 @@ struct intel_guc {
enum forcewake_domains fw_domains;
} send_regs;
+ /* Store msg (e.g. log flush) that we see while CTBs are disabled */
+ u32 mmio_msg;
+
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
@@ -154,11 +135,9 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
-void intel_guc_init_params(struct intel_guc *guc);
-int intel_guc_init_misc(struct intel_guc *guc);
+void intel_guc_write_params(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
-void intel_guc_fini_misc(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
@@ -173,17 +152,34 @@ int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-static inline bool intel_guc_is_loaded(struct intel_guc *guc)
+static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
- return intel_uc_fw_is_loaded(&guc->fw);
+ return intel_uc_fw_is_supported(&guc->fw);
+}
+
+static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_enabled(&guc->fw);
+}
+
+static inline bool intel_guc_is_running(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_running(&guc->fw);
}
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
intel_uc_fw_sanitize(&guc->fw);
+ guc->mmio_msg = 0;
+
return 0;
}
+static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
+{
+ return guc->submission_supported;
+}
+
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ecb69fc94218..ca6674b8e00c 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -1,27 +1,9 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -83,18 +65,14 @@ struct __guc_ads_blob {
u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
} __packed;
-static int __guc_ads_init(struct intel_guc *guc)
+static void __guc_ads_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct __guc_ads_blob *blob;
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct __guc_ads_blob *blob = guc->ads_blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
u8 engine_class;
- blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
-
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
@@ -144,9 +122,7 @@ static int __guc_ads_init(struct intel_guc *guc)
blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
blob->ads.clients_info = base + ptr_offset(blob, clients_info);
- i915_gem_object_unpin_map(guc->ads_vma->obj);
-
- return 0;
+ i915_gem_object_flush_map(guc->ads_vma->obj);
}
/**
@@ -160,6 +136,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
{
const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
struct i915_vma *vma;
+ void *blob;
int ret;
GEM_BUG_ON(guc->ads_vma);
@@ -168,11 +145,16 @@ int intel_guc_ads_create(struct intel_guc *guc)
if (IS_ERR(vma))
return PTR_ERR(vma);
+ blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ goto err_vma;
+ }
+
guc->ads_vma = vma;
+ guc->ads_blob = blob;
- ret = __guc_ads_init(guc);
- if (ret)
- goto err_vma;
+ __guc_ads_init(guc);
return 0;
@@ -183,7 +165,7 @@ err_vma:
void intel_guc_ads_destroy(struct intel_guc *guc)
{
- i915_vma_unpin_and_release(&guc->ads_vma, 0);
+ i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
new file mode 100644
index 000000000000..b00d3ae1113a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_ADS_H_
+#define _INTEL_GUC_ADS_H_
+
+struct intel_guc;
+
+int intel_guc_ads_create(struct intel_guc *guc);
+void intel_guc_ads_destroy(struct intel_guc *guc);
+void intel_guc_ads_reset(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 3921809f812b..b49115517510 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -1,24 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2016-2019 Intel Corporation
*/
#include "i915_drv.h"
@@ -529,8 +511,8 @@ unlink:
/*
* Command Transport (CT) buffer based GuC send function.
*/
-static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size)
+int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
{
struct intel_guc_ct *ct = &guc->ct;
struct intel_guc_ct_channel *ctch = &ct->host_channel;
@@ -834,7 +816,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
* When we're communicating with the GuC over CT, GuC uses events
* to notify us about new messages being posted on the RECV buffer.
*/
-static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
+void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
{
struct intel_guc_ct *ct = &guc->ct;
@@ -892,20 +874,11 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
- int err;
if (ctch->enabled)
return 0;
- err = ctch_enable(guc, ctch);
- if (unlikely(err))
- return err;
-
- /* Switch into cmd transport buffer based send() */
- guc->send = intel_guc_send_ct;
- guc->handler = intel_guc_to_host_event_handler_ct;
- DRM_INFO("CT: %s\n", enableddisabled(true));
- return 0;
+ return ctch_enable(guc, ctch);
}
/**
@@ -921,9 +894,4 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
return;
ctch_disable(guc, ctch);
-
- /* Disable send */
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
- DRM_INFO("CT: %s\n", enableddisabled(false));
}
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 41ba593a4df7..7c24d83f5c24 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -1,34 +1,19 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2016-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_CT_H_
#define _INTEL_GUC_CT_H_
-struct intel_guc;
-struct i915_vma;
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include "intel_guc_fwif.h"
+struct i915_vma;
+struct intel_guc;
+
/**
* DOC: Command Transport (CT).
*
@@ -101,4 +86,8 @@ static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
ct->host_channel.enabled = false;
}
+int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size);
+void intel_guc_to_host_event_handler_ct(struct intel_guc *guc);
+
#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
new file mode 100644
index 000000000000..5528224448f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ *
+ * Authors:
+ * Vinit Azad <vinit.azad@intel.com>
+ * Ben Widawsky <ben@bwidawsk.net>
+ * Dave Gordon <david.s.gordon@intel.com>
+ * Alex Dai <yu.dai@intel.com>
+ */
+
+#include "gt/intel_gt.h"
+#include "intel_guc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_guc_fw_init_early() - initializes GuC firmware struct
+ * @guc: intel_guc struct
+ *
+ * On platforms with GuC selects firmware for uploading
+ */
+void intel_guc_fw_init_early(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
+ INTEL_INFO(i915)->platform, INTEL_REVID(i915));
+}
+
+static void guc_prepare_xfer(struct intel_uncore *uncore)
+{
+ u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_MIA_CACHING |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING;
+
+ /* Must program this register before loading the ucode with DMA */
+ intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
+
+ if (IS_GEN9_LP(uncore->i915))
+ intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ else
+ intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+ if (IS_GEN(uncore->i915, 9)) {
+ /* DOP Clock Gating Enable for GuC clocks */
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
+
+ /* allows for 5us (in 10ns units) before GT can go to RC6 */
+ intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
+ }
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ u32 rsa[UOS_RSA_SCRATCH_COUNT];
+ size_t copied;
+ int i;
+
+ copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
+ GEM_BUG_ON(copied < sizeof(rsa));
+
+ for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for()
+ * loop below.
+ */
+static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
+{
+ u32 val = intel_uncore_read(uncore, GUC_STATUS);
+ u32 uk_val = val & GS_UKERNEL_MASK;
+
+ *status = val;
+ return (uk_val == GS_UKERNEL_READY) ||
+ ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+}
+
+static int guc_wait_ucode(struct intel_uncore *uncore)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Wait for the GuC to start up.
+ * NB: Docs recommend not using the interrupt for completion.
+ * Measurements indicate this should take no more than 20ms, so a
+ * timeout here indicates that the GuC has failed and is unusable.
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
+ */
+ ret = wait_for(guc_ready(uncore, &status), 100);
+ DRM_DEBUG_DRIVER("GuC status %#x\n", status);
+
+ if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
+ DRM_ERROR("GuC firmware signature verification failed\n");
+ ret = -ENOEXEC;
+ }
+
+ if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
+ DRM_ERROR("GuC firmware exception. EIP: %#x\n",
+ intel_uncore_read(uncore, SOFT_SCRATCH(13)));
+ ret = -ENXIO;
+ }
+
+ return ret;
+}
+
+/**
+ * intel_guc_fw_upload() - load GuC uCode to device
+ * @guc: intel_guc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_guc_fw_upload(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ guc_prepare_xfer(uncore);
+
+ /*
+ * Note that GuC needs the CSS header plus uKernel code to be copied
+ * by the DMA engine in one operation, whereas the RSA signature is
+ * loaded via MMIO.
+ */
+ guc_xfer_rsa(&guc->fw, uncore);
+
+ /*
+ * Current uCode expects the code to be loaded at 8k; locations below
+ * this are used for the stack.
+ */
+ ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE);
+ if (ret)
+ goto out;
+
+ ret = guc_wait_ucode(uncore);
+ if (ret)
+ goto out;
+
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ return 0;
+
+out:
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
new file mode 100644
index 000000000000..b5ab639d7259
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2017-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_FW_H_
+#define _INTEL_GUC_FW_H_
+
+struct intel_guc;
+
+void intel_guc_fw_init_early(struct intel_guc *guc);
+int intel_guc_fw_upload(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index f55f3bc8524d..1d3cdd67ca2f 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -1,28 +1,15 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2014-2019 Intel Corporation
*/
+
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+
#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
#define GUC_CLIENT_PRIORITY_HIGH 1
#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
@@ -39,13 +26,8 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
-/*
- * XXX: Beware that Gen9 firmware 32.x uses wrong definition for
- * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now
- * as we are not enabling GuC submission mode where this will be used
- */
#define GUC_MAX_ENGINE_CLASSES 5
-#define GUC_MAX_INSTANCES_PER_CLASS 4
+#define GUC_MAX_INSTANCES_PER_CLASS 16
#define GUC_DOORBELL_INVALID 256
@@ -122,76 +104,6 @@
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
-/**
- * DOC: GuC Firmware Layout
- *
- * The GuC firmware layout looks like this:
- *
- * +-------------------------------+
- * | uc_css_header |
- * | |
- * | contains major/minor version |
- * +-------------------------------+
- * | uCode |
- * +-------------------------------+
- * | RSA signature |
- * +-------------------------------+
- * | modulus key |
- * +-------------------------------+
- * | exponent val |
- * +-------------------------------+
- *
- * The firmware may or may not have modulus key and exponent data. The header,
- * uCode and RSA signature are must-have components that will be used by driver.
- * Length of each components, which is all in dwords, can be found in header.
- * In the case that modulus and exponent are not present in fw, a.k.a truncated
- * image, the length value still appears in header.
- *
- * Driver will do some basic fw size validation based on the following rules:
- *
- * 1. Header, uCode and RSA are must-have components.
- * 2. All firmware components, if they present, are in the sequence illustrated
- * in the layout table above.
- * 3. Length info of each component can be found in header, in dwords.
- * 4. Modulus and exponent key are not required by driver. They may not appear
- * in fw. So driver will load a truncated firmware in this case.
- *
- * HuC firmware layout is same as GuC firmware.
- * Only HuC version information is saved in a different way.
- */
-
-struct uc_css_header {
- u32 module_type;
- /* header_size includes all non-uCode bits, including css_header, rsa
- * key, modulus key and exponent data. */
- u32 header_size_dw;
- u32 header_version;
- u32 module_id;
- u32 module_vendor;
- u32 date;
-#define CSS_DATE_DAY (0xFF << 0)
-#define CSS_DATE_MONTH (0xFF << 8)
-#define CSS_DATE_YEAR (0xFFFF << 16)
- u32 size_dw; /* uCode plus header_size_dw */
- u32 key_size_dw;
- u32 modulus_size_dw;
- u32 exponent_size_dw;
- u32 time;
-#define CSS_TIME_HOUR (0xFF << 0)
-#define CSS_DATE_MIN (0xFF << 8)
-#define CSS_DATE_SEC (0xFFFF << 16)
- char username[8];
- char buildnumber[12];
- u32 sw_version;
-#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
-#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
-#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
- u32 reserved[14];
- u32 header_info;
-} __packed;
-
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index e3b83ecb90b5..36332064de9c 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -1,31 +1,14 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#include <linux/debugfs.h>
-#include "intel_guc_log.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
+#include "i915_memcpy.h"
+#include "intel_guc_log.h"
static void guc_log_capture_logs(struct intel_guc_log *log);
@@ -209,7 +192,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
log->stats[type].sampled_overflow += 16;
}
- dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev,
+ dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
"GuC log buffer overflow\n");
}
@@ -383,12 +366,13 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
lockdep_assert_held(&log->relay.lock);
+ GEM_BUG_ON(!log->vma);
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = log->vma->size;
@@ -429,7 +413,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_capture_logs(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
@@ -442,6 +426,29 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
guc_action_flush_log_complete(guc);
}
+static u32 __get_default_log_level(struct intel_guc_log *log)
+{
+ /* A negative value means "use platform/config default" */
+ if (i915_modparams.guc_log_level < 0) {
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
+ }
+
+ if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "guc_log_level", i915_modparams.guc_log_level,
+ "verbosity too high");
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
+ }
+
+ GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
+ GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
+ return i915_modparams.guc_log_level;
+}
+
int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
@@ -481,7 +488,11 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->vma = vma;
- log->level = i915_modparams.guc_log_level;
+ log->level = __get_default_log_level(log);
+ DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
+ log->level, enableddisabled(log->level),
+ yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
+ GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
return 0;
@@ -498,7 +509,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
@@ -544,6 +555,9 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
{
int ret;
+ if (!log->vma)
+ return -ENODEV;
+
mutex_lock(&log->relay.lock);
if (intel_guc_log_relay_enabled(log)) {
@@ -578,7 +592,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
* the flush notification. This means that we need to unconditionally
* flush on relay enabling, since GuC only notifies us once.
*/
- queue_work(log->relay.flush_wq, &log->relay.flush_work);
+ queue_work(system_highpri_wq, &log->relay.flush_work);
return 0;
@@ -593,7 +607,7 @@ out_unlock:
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
/*
@@ -612,10 +626,10 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
void intel_guc_log_relay_close(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
guc_log_disable_flush_events(log);
- synchronize_irq(i915->drm.irq);
+ intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
@@ -628,5 +642,5 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
- queue_work(log->relay.flush_wq, &log->relay.flush_work);
+ queue_work(system_highpri_wq, &log->relay.flush_work);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 7bc763f10c03..6f764879acb1 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_LOG_H_
@@ -66,7 +47,6 @@ struct intel_guc_log {
struct i915_vma *vma;
struct {
void *buf_addr;
- struct workqueue_struct *flush_wq;
struct work_struct flush_work;
struct rchan *channel;
struct mutex lock;
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index a214f8b71929..edf194d23c6b 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -1,29 +1,16 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+
#ifndef _INTEL_GUC_REG_H_
#define _INTEL_GUC_REG_H_
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
/* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS _MMIO(0xc000)
@@ -37,6 +24,7 @@
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
@@ -135,21 +123,21 @@ struct guc_doorbell_info {
#define GUC_PM_P24C_IER _MMIO(0xC55C)
/* GuC Interrupt Vector */
-#define GEN11_GUC_INTR_GUC2HOST (1 << 15)
-#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14)
-#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13)
-#define GEN11_GUC_INTR_SEM_SIG (1 << 12)
-#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11)
-#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10)
-#define GEN11_GUC_INTR_DMA_DONE (1 << 9)
-#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8)
-#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7)
-#define GEN11_GUC_INTR_SW_INT_6 (1 << 6)
-#define GEN11_GUC_INTR_SW_INT_5 (1 << 5)
-#define GEN11_GUC_INTR_SW_INT_4 (1 << 4)
-#define GEN11_GUC_INTR_SW_INT_3 (1 << 3)
-#define GEN11_GUC_INTR_SW_INT_2 (1 << 2)
-#define GEN11_GUC_INTR_SW_INT_1 (1 << 1)
-#define GEN11_GUC_INTR_SW_INT_0 (1 << 0)
+#define GUC_INTR_GUC2HOST BIT(15)
+#define GUC_INTR_EXEC_ERROR BIT(14)
+#define GUC_INTR_DISPLAY_EVENT BIT(13)
+#define GUC_INTR_SEM_SIG BIT(12)
+#define GUC_INTR_IOMMU2GUC BIT(11)
+#define GUC_INTR_DOORBELL_RANG BIT(10)
+#define GUC_INTR_DMA_DONE BIT(9)
+#define GUC_INTR_FATAL_ERROR BIT(8)
+#define GUC_INTR_NOTIF_ERROR BIT(7)
+#define GUC_INTR_SW_INT_6 BIT(6)
+#define GUC_INTR_SW_INT_5 BIT(5)
+#define GUC_INTR_SW_INT_4 BIT(4)
+#define GUC_INTR_SW_INT_3 BIT(3)
+#define GUC_INTR_SW_INT_2 BIT(2)
+#define GUC_INTR_SW_INT_1 BIT(1)
+#define GUC_INTR_SW_INT_0 BIT(0)
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index db531ebc7704..f325d3dd564f 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1,38 +1,27 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include <linux/circ_buf.h>
-#include "gt/intel_engine_pm.h"
-#include "gt/intel_lrc_reg.h"
-#include "gt/intel_context.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_lrc_reg.h"
#include "intel_guc_submission.h"
+
#include "i915_drv.h"
+#include "i915_trace.h"
-#define GUC_PREEMPT_FINISHED 0x1
+enum {
+ GUC_PREEMPT_NONE = 0,
+ GUC_PREEMPT_INPROGRESS,
+ GUC_PREEMPT_FINISHED,
+};
#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
#define GUC_PREEMPT_BREADCRUMB_BYTES \
(sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
@@ -42,11 +31,10 @@
*
* GuC client:
* A intel_guc_client refers to a submission path through GuC. Currently, there
- * are two clients. One of them (the execbuf_client) is charged with all
- * submissions to the GuC, the other one (preempt_client) is responsible for
- * preempting the execbuf_client. This struct is the owner of a doorbell, a
- * process descriptor and a workqueue (all of them inside a single gem object
- * that contains all required pages for these elements).
+ * is only one client, which is charged with all submissions to the GuC. This
+ * struct is the owner of a doorbell, a process descriptor and a workqueue (all
+ * of them inside a single gem object that contains all required pages for these
+ * elements).
*
* GuC stage descriptor:
* During initialization, the driver allocates a static pool of 1024 such
@@ -84,12 +72,6 @@
*
*/
-static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_PREEMPT_ADDR);
-}
-
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -203,10 +185,10 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+ return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
}
static void __init_doorbell(struct intel_guc_client *client)
@@ -366,10 +348,7 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
static void guc_stage_desc_init(struct intel_guc_client *client)
{
struct intel_guc *guc = client->guc;
- struct i915_gem_context *ctx = client->owner;
- struct i915_gem_engines_iter it;
struct guc_stage_desc *desc;
- struct intel_context *ce;
u32 gfx_addr;
desc = __get_stage_desc(client);
@@ -383,55 +362,6 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- struct guc_execlist_context *lrc;
-
- if (!(ce->engine->mask & client->engines))
- continue;
-
- /* TODO: We have a design issue to be solved here. Only when we
- * receive the first batch, we know which engine is used by the
- * user. But here GuC expects the lrc and ring to be pinned. It
- * is not an issue for default context, which is the only one
- * for now who owns a GuC client. But for future owner of GuC
- * client, need to make sure lrc is pinned prior to enter here.
- */
- if (!ce->state)
- break; /* XXX: continue? */
-
- /*
- * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
- * submission or, in other words, not using a direct submission
- * model) the KMD's LRCA is not used for any work submission.
- * Instead, the GuC uses the LRCA of the user mode context (see
- * guc_add_request below).
- */
- lrc = &desc->lrc[ce->engine->guc_id];
- lrc->context_desc = lower_32_bits(ce->lrc_desc);
-
- /* The state page is after PPHWSP */
- lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) +
- LRC_STATE_PN * PAGE_SIZE;
-
- /* XXX: In direct submission, the GuC wants the HW context id
- * here. In proxy submission, it wants the stage id
- */
- lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
- (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET);
-
- lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
- lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
- lrc->ring_next_free_location = lrc->ring_begin;
- lrc->ring_current_tail_pointer_value = 0;
-
- desc->engines_used |= BIT(ce->engine->guc_id);
- }
- i915_gem_context_unlock_engines(ctx);
-
- DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc->engines_used);
- WARN_ON(desc->engines_used == 0);
-
/*
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
@@ -537,15 +467,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
- spin_lock(&client->wq_lock);
-
guc_wq_item_append(client, engine->guc_id, ctx_desc,
ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
-
- client->submissions[engine->id] += 1;
-
- spin_unlock(&client->wq_lock);
}
/*
@@ -563,207 +487,78 @@ static void flush_ggtt_writes(struct i915_vma *vma)
intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
}
-static void inject_preempt_context(struct work_struct *work)
+static void guc_submit(struct intel_engine_cs *engine,
+ struct i915_request **out,
+ struct i915_request **end)
{
- struct guc_preempt_work *preempt_work =
- container_of(work, typeof(*preempt_work), work);
- struct intel_engine_cs *engine = preempt_work->engine;
- struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
- preempt_work[engine->id]);
- struct intel_guc_client *client = guc->preempt_client;
- struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_context *ce = engine->preempt_context;
- u32 data[7];
-
- if (!ce->ring->emit) { /* recreate upon load/resume */
- u32 addr = intel_hws_preempt_done_address(engine);
- u32 *cs;
-
- cs = ce->ring->vaddr;
- if (engine->class == RENDER_CLASS) {
- cs = gen8_emit_ggtt_write_rcs(cs,
- GUC_PREEMPT_FINISHED,
- addr,
- PIPE_CONTROL_CS_STALL);
- } else {
- cs = gen8_emit_ggtt_write(cs,
- GUC_PREEMPT_FINISHED,
- addr,
- 0);
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
- GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
-
- flush_ggtt_writes(ce->ring->vma);
- }
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc_client *client = guc->execbuf_client;
- spin_lock_irq(&client->wq_lock);
- guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
- GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
- spin_unlock_irq(&client->wq_lock);
+ spin_lock(&client->wq_lock);
- /*
- * If GuC firmware performs an engine reset while that engine had
- * a preemption pending, it will set the terminated attribute bit
- * on our preemption stage descriptor. GuC firmware retains all
- * pending work items for a high-priority GuC client, unlike the
- * normal-priority GuC client where work items are dropped. It
- * wants to make sure the preempt-to-idle work doesn't run when
- * scheduling resumes, and uses this bit to inform its scheduler
- * and presumably us as well. Our job is to clear it for the next
- * preemption after reset, otherwise that and future preemptions
- * will never complete. We'll just clear it every time.
- */
- stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
-
- data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
- data[1] = client->stage_id;
- data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
- INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
- data[3] = engine->guc_id;
- data[4] = guc->execbuf_client->priority;
- data[5] = guc->execbuf_client->stage_id;
- data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
-
- if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
- execlists_clear_active(&engine->execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- tasklet_schedule(&engine->execlists.tasklet);
- }
+ do {
+ struct i915_request *rq = *out++;
- (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
-}
+ flush_ggtt_writes(rq->ring->vma);
+ guc_add_request(guc, rq);
+ } while (out != end);
-/*
- * We're using user interrupt and HWSP value to mark that preemption has
- * finished and GPU is idle. Normally, we could unwind and continue similar to
- * execlists submission path. Unfortunately, with GuC we also need to wait for
- * it to finish its own postprocessing, before attempting to submit. Otherwise
- * GuC may silently ignore our submissions, and thus we risk losing request at
- * best, executing out-of-order and causing kernel panic at worst.
- */
-#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
-static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
-{
- struct intel_guc *guc = &engine->i915->guc;
- struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
- struct guc_ctx_report *report =
- &data->preempt_ctx_report[engine->guc_id];
-
- if (wait_for_atomic(report->report_return_status ==
- INTEL_GUC_REPORT_STATUS_COMPLETE,
- GUC_PREEMPT_POSTPROCESS_DELAY_MS))
- DRM_ERROR("Timed out waiting for GuC preemption report\n");
- /*
- * GuC is expecting that we're also going to clear the affected context
- * counter, let's also reset the return status to not depend on GuC
- * resetting it after recieving another preempt action
- */
- report->affected_count = 0;
- report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
+ spin_unlock(&client->wq_lock);
}
-static void complete_preempt_context(struct intel_engine_cs *engine)
+static inline int rq_prio(const struct i915_request *rq)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
-
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
-
- if (inject_preempt_hang(execlists))
- return;
-
- execlists_cancel_port_requests(execlists);
- execlists_unwind_incomplete_requests(execlists);
-
- wait_for_guc_preempt_report(engine);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0);
+ return rq->sched.attr.priority | __NO_PREEMPTION;
}
-/**
- * guc_submit() - Submit commands through GuC
- * @engine: engine associated with the commands
- *
- * The only error here arises if the doorbell hardware isn't functioning
- * as expected, which really shouln't happen.
- */
-static void guc_submit(struct intel_engine_cs *engine)
+static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{
- struct intel_guc *guc = &engine->i915->guc;
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- unsigned int n;
-
- for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct i915_request *rq;
- unsigned int count;
-
- rq = port_unpack(&port[n], &count);
- if (rq && count == 0) {
- port_set(&port[n], port_pack(rq, ++count));
-
- flush_ggtt_writes(rq->ring->vma);
+ trace_i915_request_in(rq, idx);
- guc_add_request(guc, rq);
- }
- }
-}
-
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
- GEM_BUG_ON(port_isset(port));
+ /*
+ * Currently we are not tracking the rq->context being inflight
+ * (ce->inflight = rq->engine). It is only used by the execlists
+ * backend at the moment, a similar counting strategy would be
+ * required if we generalise the inflight tracking.
+ */
- port_set(port, i915_request_get(rq));
+ intel_gt_pm_get(rq->engine->gt);
+ return i915_request_get(rq);
}
-static inline int rq_prio(const struct i915_request *rq)
+static void schedule_out(struct i915_request *rq)
{
- return rq->sched.attr.priority;
-}
+ trace_i915_request_out(rq);
-static inline int port_prio(const struct execlist_port *port)
-{
- return rq_prio(port_request(port)) | __NO_PREEMPTION;
+ intel_gt_pm_put(rq->engine->gt);
+ i915_request_put(rq);
}
-static bool __guc_dequeue(struct intel_engine_cs *engine)
+static void __guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct i915_request *last = NULL;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
+ struct i915_request **first = execlists->inflight;
+ struct i915_request ** const last_port = first + execlists->port_mask;
+ struct i915_request *last = first[0];
+ struct i915_request **port;
bool submit = false;
struct rb_node *rb;
lockdep_assert_held(&engine->active.lock);
- if (port_isset(port)) {
- if (intel_engine_has_preemption(engine)) {
- struct guc_preempt_work *preempt_work =
- &engine->i915->guc.preempt_work[engine->id];
- int prio = execlists->queue_priority_hint;
-
- if (i915_scheduler_need_preempt(prio,
- port_prio(port))) {
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- queue_work(engine->i915->guc.preempt_wq,
- &preempt_work->work);
- return false;
- }
- }
+ if (last) {
+ if (*++first)
+ return;
- port++;
- if (port_isset(port))
- return false;
+ last = NULL;
}
- GEM_BUG_ON(port_isset(port));
+ /*
+ * We write directly into the execlists->inflight queue and don't use
+ * the execlists->pending queue, as we don't have a distinct switch
+ * event.
+ */
+ port = first;
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -774,18 +569,15 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
if (port == last_port)
goto done;
- if (submit)
- port_assign(port, last);
+ *port = schedule_in(last,
+ port - execlists->inflight);
port++;
}
list_del_init(&rq->sched.link);
-
__i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
-
- last = rq;
submit = true;
+ last = rq;
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -794,58 +586,36 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
done:
execlists->queue_priority_hint =
rb ? to_priolist(rb)->priority : INT_MIN;
- if (submit)
- port_assign(port, last);
- if (last)
- execlists_user_begin(execlists, execlists->port);
-
- /* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(port_isset(execlists->port) &&
- !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
- !port_isset(execlists->port));
-
- return submit;
-}
-
-static void guc_dequeue(struct intel_engine_cs *engine)
-{
- if (__guc_dequeue(engine))
- guc_submit(engine);
+ if (submit) {
+ *port = schedule_in(last, port - execlists->inflight);
+ *++port = NULL;
+ guc_submit(engine, first, port);
+ }
+ execlists->active = execlists->inflight;
}
static void guc_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct i915_request *rq;
+ struct i915_request **port, *rq;
unsigned long flags;
spin_lock_irqsave(&engine->active.lock, flags);
- rq = port_request(port);
- while (rq && i915_request_completed(rq)) {
- trace_i915_request_out(rq);
- i915_request_put(rq);
-
- port = execlists_port_complete(execlists, port);
- if (port_isset(port)) {
- execlists_user_begin(execlists, port);
- rq = port_request(port);
- } else {
- execlists_user_end(execlists);
- rq = NULL;
- }
- }
+ for (port = execlists->inflight; (rq = *port); port++) {
+ if (!i915_request_completed(rq))
+ break;
- if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
- intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
- GUC_PREEMPT_FINISHED)
- complete_preempt_context(engine);
+ schedule_out(rq);
+ }
+ if (port != execlists->inflight) {
+ int idx = port - execlists->inflight;
+ int rem = ARRAY_SIZE(execlists->inflight) - idx;
+ memmove(execlists->inflight, port, rem * sizeof(*port));
+ }
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
- guc_dequeue(engine);
+ __guc_dequeue(engine);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -866,16 +636,19 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
* prevents the race.
*/
__tasklet_disable_sync_once(&execlists->tasklet);
+}
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
+static void
+cancel_port_requests(struct intel_engine_execlists * const execlists)
+{
+ struct i915_request * const *port, *rq;
+
+ /* Note we are only using the inflight and not the pending queue */
+
+ for (port = execlists->active; (rq = *port); port++)
+ schedule_out(rq);
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
}
static void guc_reset(struct intel_engine_cs *engine, bool stalled)
@@ -886,7 +659,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags);
- execlists_cancel_port_requests(execlists);
+ cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
@@ -896,7 +669,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
if (!i915_request_started(rq))
stalled = false;
- i915_reset_request(rq, stalled);
+ __i915_request_reset(rq, stalled);
intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
out_unlock:
@@ -929,7 +702,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
- execlists_cancel_port_requests(execlists);
+ cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
@@ -959,7 +732,6 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -1014,25 +786,18 @@ static bool guc_verify_doorbells(struct intel_guc *guc)
/**
* guc_client_alloc() - Allocate an intel_guc_client
- * @dev_priv: driver private data structure
- * @engines: The set of engines to enable for this client
+ * @guc: the intel_guc structure
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
* The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH,
* while a preemption context can use CRITICAL.
- * @ctx: the context that owns the client (we use the default render
- * context)
*
* Return: An intel_guc_client object if success, else NULL.
*/
static struct intel_guc_client *
-guc_client_alloc(struct drm_i915_private *dev_priv,
- u32 engines,
- u32 priority,
- struct i915_gem_context *ctx)
+guc_client_alloc(struct intel_guc *guc, u32 priority)
{
struct intel_guc_client *client;
- struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
int ret;
@@ -1042,8 +807,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
return ERR_PTR(-ENOMEM);
client->guc = guc;
- client->owner = ctx;
- client->engines = engines;
client->priority = priority;
client->doorbell_id = GUC_DOORBELL_INVALID;
spin_lock_init(&client->wq_lock);
@@ -1088,8 +851,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
- priority, client, client->engines, client->stage_id);
+ DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n",
+ priority, client, client->stage_id);
DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
client->doorbell_id, client->doorbell_offset);
@@ -1129,36 +892,17 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
static int guc_clients_create(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_guc_client *client;
GEM_BUG_ON(guc->execbuf_client);
- GEM_BUG_ON(guc->preempt_client);
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- GUC_CLIENT_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
+ client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL);
if (IS_ERR(client)) {
DRM_ERROR("Failed to create GuC client for submission!\n");
return PTR_ERR(client);
}
guc->execbuf_client = client;
- if (dev_priv->preempt_context) {
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- GUC_CLIENT_PRIORITY_KMD_HIGH,
- dev_priv->preempt_context);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for preemption!\n");
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
- return PTR_ERR(client);
- }
- guc->preempt_client = client;
- }
-
return 0;
}
@@ -1166,10 +910,6 @@ static void guc_clients_destroy(struct intel_guc *guc)
{
struct intel_guc_client *client;
- client = fetch_and_zero(&guc->preempt_client);
- if (client)
- guc_client_free(client);
-
client = fetch_and_zero(&guc->execbuf_client);
if (client)
guc_client_free(client);
@@ -1201,7 +941,7 @@ static void __guc_client_disable(struct intel_guc_client *client)
* the case, instead of trying (in vain) to communicate with it, let's
* just cleanup the doorbell HW and our internal state.
*/
- if (intel_guc_is_loaded(client->guc))
+ if (intel_guc_is_running(client->guc))
destroy_doorbell(client);
else
__fini_doorbell(client);
@@ -1212,28 +952,11 @@ static void __guc_client_disable(struct intel_guc_client *client)
static int guc_clients_enable(struct intel_guc *guc)
{
- int ret;
-
- ret = __guc_client_enable(guc->execbuf_client);
- if (ret)
- return ret;
-
- if (guc->preempt_client) {
- ret = __guc_client_enable(guc->preempt_client);
- if (ret) {
- __guc_client_disable(guc->execbuf_client);
- return ret;
- }
- }
-
- return 0;
+ return __guc_client_enable(guc->execbuf_client);
}
static void guc_clients_disable(struct intel_guc *guc)
{
- if (guc->preempt_client)
- __guc_client_disable(guc->preempt_client);
-
if (guc->execbuf_client)
__guc_client_disable(guc->execbuf_client);
}
@@ -1244,9 +967,6 @@ static void guc_clients_disable(struct intel_guc *guc)
*/
int intel_guc_submission_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
int ret;
if (guc->stage_desc_pool)
@@ -1266,11 +986,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
if (ret)
goto err_pool;
- for_each_engine(engine, dev_priv, id) {
- guc->preempt_work[id].engine = engine;
- INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
- }
-
return 0;
err_pool:
@@ -1280,13 +995,6 @@ err_pool:
void intel_guc_submission_fini(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- cancel_work_sync(&guc->preempt_work[id].work);
-
guc_clients_destroy(guc);
WARN_ON(!guc_verify_doorbells(guc));
@@ -1294,9 +1002,10 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_stage_desc_pool_destroy(guc);
}
-static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1305,16 +1014,16 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
* to GuC
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, gt->i915, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
/* These three registers have the same bit definitions */
- I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
- I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
- I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
/*
* The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
@@ -1339,9 +1048,10 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1352,31 +1062,18 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
*/
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, gt->i915, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
+ intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
+ intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
+ intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
}
-static void guc_submission_park(struct intel_engine_cs *engine)
-{
- intel_engine_park(engine);
- intel_engine_unpin_breadcrumbs_irq(engine);
- engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
-}
-
-static void guc_submission_unpark(struct intel_engine_cs *engine)
-{
- engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
- intel_engine_pin_breadcrumbs_irq(engine);
-}
-
static void guc_set_default_submission(struct intel_engine_cs *engine)
{
/*
@@ -1394,8 +1091,8 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->execlists.tasklet.func = guc_submission_tasklet;
- engine->park = guc_submission_park;
- engine->unpark = guc_submission_unpark;
+ /* do not use execlists park/unpark */
+ engine->park = engine->unpark = NULL;
engine->reset.prepare = guc_reset_prepare;
engine->reset.reset = guc_reset;
@@ -1404,15 +1101,28 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->cancel_requests = guc_cancel_requests;
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+ engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+
+ /*
+ * For the breadcrumb irq to work we need the interrupts to stay
+ * enabled. However, on all platforms on which we'll have support for
+ * GuC submission we don't allow disabling the interrupts at runtime, so
+ * we're always safe with the current flow.
+ */
+ GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
int intel_guc_submission_enable(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
+ err = i915_inject_load_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
/*
* We're using GuC work items for submitting work through GuC. Since
* we're coalescing multiple requests from a single context into a
@@ -1422,7 +1132,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
* and it is guaranteed that it will remove the work item from the
* queue before our request is completed.
*/
- BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
+ BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
@@ -1433,9 +1143,9 @@ int intel_guc_submission_enable(struct intel_guc *guc)
return err;
/* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(dev_priv);
+ guc_interrupts_capture(gt);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt->i915, id) {
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
@@ -1445,14 +1155,30 @@ int intel_guc_submission_enable(struct intel_guc *guc)
void intel_guc_submission_disable(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
+ GEM_BUG_ON(gt->awake); /* GT should be parked first */
- guc_interrupts_release(dev_priv);
+ guc_interrupts_release(gt);
guc_clients_disable(guc);
}
+static bool __guc_submission_support(struct intel_guc *guc)
+{
+ /* XXX: GuC submission is unavailable for now */
+ return false;
+
+ if (!intel_guc_is_supported(guc))
+ return false;
+
+ return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+void intel_guc_submission_init_early(struct intel_guc *guc)
+{
+ guc->submission_supported = __guc_submission_support(guc);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_guc.c"
+#include "selftest_guc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 7d823a513b9c..54d716828352 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_SUBMISSION_H_
@@ -58,11 +39,9 @@ struct drm_i915_private;
struct intel_guc_client {
struct i915_vma *vma;
void *vaddr;
- struct i915_gem_context *owner;
struct intel_guc *guc;
/* bitmap of (host) engine ids */
- u32 engines;
u32 priority;
u32 stage_id;
u32 proc_desc_offset;
@@ -72,13 +51,12 @@ struct intel_guc_client {
/* Protects GuC client's WQ access */
spinlock_t wq_lock;
- /* Per-engine counts of GuC submissions */
- u64 submissions[I915_NUM_ENGINES];
/* For testing purposes, use nop WQ items instead of real ones */
I915_SELFTEST_DECLARE(bool use_nop_wqi);
};
+void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index fb6f693d3cac..d4625c97b4f9 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -1,35 +1,17 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2016-2019 Intel Corporation
*/
#include <linux/types.h>
+#include "gt/intel_gt.h"
#include "intel_huc.h"
#include "i915_drv.h"
void intel_huc_init_early(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
intel_huc_fw_init_early(huc);
@@ -44,20 +26,18 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
-int intel_huc_init_misc(struct intel_huc *huc)
-{
- struct drm_i915_private *i915 = huc_to_i915(huc);
-
- intel_uc_fw_fetch(i915, &huc->fw);
- return 0;
-}
-
static int intel_huc_rsa_data_create(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->guc;
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = &gt->uc.guc;
struct i915_vma *vma;
+ size_t copied;
void *vaddr;
+ int err;
+
+ err = i915_inject_load_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
/*
* HuC firmware will sit above GUC_GGTT_TOP and will not map
@@ -69,6 +49,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
* the authentication since its GGTT offset will be GuC
* accessible.
*/
+ GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE);
vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -79,32 +60,56 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
return PTR_ERR(vaddr);
}
+ copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
+ GEM_BUG_ON(copied < huc->fw.rsa_size);
+
+ i915_gem_object_unpin_map(vma->obj);
+
huc->rsa_data = vma;
- huc->rsa_data_vaddr = vaddr;
return 0;
}
static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
{
- i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP);
+ i915_vma_unpin_and_release(&huc->rsa_data, 0);
}
int intel_huc_init(struct intel_huc *huc)
{
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
+ err = intel_uc_fw_init(&huc->fw);
+ if (err)
+ goto out;
+
+ /*
+ * HuC firmware image is outside GuC accessible range.
+ * Copy the RSA signature out of the image into
+ * a perma-pinned region set aside for it
+ */
err = intel_huc_rsa_data_create(huc);
if (err)
- return err;
+ goto out_fini;
+
+ return 0;
- return intel_uc_fw_init(&huc->fw);
+out_fini:
+ intel_uc_fw_fini(&huc->fw);
+out:
+ intel_uc_fw_cleanup_fetch(&huc->fw);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
+ return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
- intel_uc_fw_fini(&huc->fw);
+ if (!intel_uc_fw_is_available(&huc->fw))
+ return;
+
intel_huc_rsa_data_destroy(huc);
+ intel_uc_fw_fini(&huc->fw);
}
/**
@@ -120,13 +125,19 @@ void intel_huc_fini(struct intel_huc *huc)
*/
int intel_huc_auth(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->guc;
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = &gt->uc.guc;
int ret;
- if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ GEM_BUG_ON(intel_huc_is_authenticated(huc));
+
+ if (!intel_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
+ ret = i915_inject_load_error(gt->i915, -ENXIO);
+ if (ret)
+ goto fail;
+
ret = intel_guc_auth_huc(guc,
intel_guc_ggtt_offset(guc, huc->rsa_data));
if (ret) {
@@ -135,7 +146,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
/* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(&i915->uncore,
+ ret = __intel_wait_for_register(gt->uncore,
huc->status.reg,
huc->status.mask,
huc->status.value,
@@ -145,12 +156,12 @@ int intel_huc_auth(struct intel_huc *huc)
goto fail;
}
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
return 0;
fail:
- huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
-
- DRM_ERROR("HuC: Authentication failed %d\n", ret);
+ i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL);
return ret;
}
@@ -167,16 +178,15 @@ fail:
*/
int intel_huc_check_status(struct intel_huc *huc)
{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ struct intel_gt *gt = huc_to_gt(huc);
intel_wakeref_t wakeref;
- bool status = false;
+ u32 status = 0;
- if (!HAS_HUC(dev_priv))
+ if (!intel_huc_is_supported(huc))
return -ENODEV;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- status = (I915_READ(huc->status.reg) & huc->status.mask) ==
- huc->status.value;
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ status = intel_uncore_read(gt->uncore, huc->status.reg);
- return status;
+ return (status & huc->status.mask) == huc->status.value;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
new file mode 100644
index 000000000000..644c059fe01d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_HUC_H_
+#define _INTEL_HUC_H_
+
+#include "i915_reg.h"
+#include "intel_uc_fw.h"
+#include "intel_huc_fw.h"
+
+struct intel_huc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* HuC-specific additions */
+ struct i915_vma *rsa_data;
+
+ struct {
+ i915_reg_t reg;
+ u32 mask;
+ u32 value;
+ } status;
+};
+
+void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init(struct intel_huc *huc);
+void intel_huc_fini(struct intel_huc *huc);
+int intel_huc_auth(struct intel_huc *huc);
+int intel_huc_check_status(struct intel_huc *huc);
+
+static inline int intel_huc_sanitize(struct intel_huc *huc)
+{
+ intel_uc_fw_sanitize(&huc->fw);
+ return 0;
+}
+
+static inline bool intel_huc_is_supported(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_supported(&huc->fw);
+}
+
+static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_enabled(&huc->fw);
+}
+
+static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_running(&huc->fw);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
new file mode 100644
index 000000000000..74602487ed67
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "intel_huc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: HuC Firmware
+ *
+ * Motivation:
+ * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * Implementation:
+ * The same firmware loader is used as the GuC. However, the actual
+ * loading to HW is deferred until GEM initialization is done.
+ *
+ * Note that HuC firmware loading must be done before GuC loading.
+ */
+
+/**
+ * intel_huc_fw_init_early() - initializes HuC firmware struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_fw_init_early(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_uc *uc = &gt->uc;
+ struct drm_i915_private *i915 = gt->i915;
+
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
+ intel_uc_uses_guc(uc),
+ INTEL_INFO(i915)->platform, INTEL_REVID(i915));
+}
+
+/**
+ * intel_huc_fw_upload() - load HuC uCode to device
+ * @huc: intel_huc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset. Note that HuC must be loaded before GuC.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_fw_upload(struct intel_huc *huc)
+{
+ /* HW doesn't look at destination address for HuC, so set it to 0 */
+ return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL);
+}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index 8a00a0ebddc5..b791269ce923 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_HUC_FW_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
new file mode 100644
index 000000000000..71ee7ab035cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_reset.h"
+#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_submission.h"
+#include "intel_uc.h"
+
+#include "i915_drv.h"
+
+/* Reset GuC providing us with fresh state for both GuC and HuC.
+ */
+static int __intel_uc_reset_hw(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ int ret;
+ u32 guc_status;
+
+ ret = i915_inject_load_error(gt->i915, -ENXIO);
+ if (ret)
+ return ret;
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
+ return ret;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ WARN(!(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
+
+ return ret;
+}
+
+static void __confirm_options(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
+ "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_uses_guc(uc)),
+ yesno(intel_uc_uses_guc_submission(uc)),
+ yesno(intel_uc_uses_huc(uc)));
+
+ if (i915_modparams.enable_guc == -1)
+ return;
+
+ if (i915_modparams.enable_guc == 0) {
+ GEM_BUG_ON(intel_uc_uses_guc(uc));
+ GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_uses_huc(uc));
+ return;
+ }
+
+ if (!intel_uc_supports_guc(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "GuC is not supported!");
+
+ if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
+ !intel_uc_supports_huc(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "HuC is not supported!");
+
+ if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
+ !intel_uc_supports_guc_submission(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "GuC submission is N/A");
+
+ if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "undocumented flag");
+}
+
+void intel_uc_init_early(struct intel_uc *uc)
+{
+ intel_guc_init_early(&uc->guc);
+ intel_huc_init_early(&uc->huc);
+
+ __confirm_options(uc);
+}
+
+void intel_uc_driver_late_release(struct intel_uc *uc)
+{
+}
+
+/**
+ * intel_uc_init_mmio - setup uC MMIO access
+ * @uc: the intel_uc structure
+ *
+ * Setup minimal state necessary for MMIO accesses later in the
+ * initialization sequence.
+ */
+void intel_uc_init_mmio(struct intel_uc *uc)
+{
+ intel_guc_init_send_regs(&uc->guc);
+}
+
+static void __uc_capture_load_err_log(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (guc->log.vma && !uc->load_err_log)
+ uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
+}
+
+static void __uc_free_load_err_log(struct intel_uc *uc)
+{
+ struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
+
+ if (log)
+ i915_gem_object_put(log);
+}
+
+/*
+ * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
+ * register using the same bits used in the CT message payload. Since our
+ * communication channel with guc is turned off at this point, we can save the
+ * message and handle it after we turn it back on.
+ */
+static void guc_clear_mmio_msg(struct intel_guc *guc)
+{
+ intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
+}
+
+static void guc_get_mmio_msg(struct intel_guc *guc)
+{
+ u32 val;
+
+ spin_lock_irq(&guc->irq_lock);
+
+ val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
+ guc->mmio_msg |= val & guc->msg_enabled_mask;
+
+ /*
+ * clear all events, including the ones we're not currently servicing,
+ * to make sure we don't try to process a stale message if we enable
+ * handling of more events later.
+ */
+ guc_clear_mmio_msg(guc);
+
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+static void guc_handle_mmio_msg(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ /* we need communication to be enabled to reply to GuC */
+ GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
+
+ if (!guc->mmio_msg)
+ return;
+
+ spin_lock_irq(&i915->irq_lock);
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ spin_unlock_irq(&i915->irq_lock);
+
+ guc->mmio_msg = 0;
+}
+
+static void guc_reset_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.reset(guc);
+}
+
+static void guc_enable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.enable(guc);
+}
+
+static void guc_disable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.disable(guc);
+}
+
+static inline bool guc_communication_enabled(struct intel_guc *guc)
+{
+ return guc->send != intel_guc_send_nop;
+}
+
+static int guc_enable_communication(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int ret;
+
+ GEM_BUG_ON(guc_communication_enabled(guc));
+
+ ret = i915_inject_load_error(i915, -ENXIO);
+ if (ret)
+ return ret;
+
+ ret = intel_guc_ct_enable(&guc->ct);
+ if (ret)
+ return ret;
+
+ guc->send = intel_guc_send_ct;
+ guc->handler = intel_guc_to_host_event_handler_ct;
+
+ /* check for mmio messages received before/during the CT enable */
+ guc_get_mmio_msg(guc);
+ guc_handle_mmio_msg(guc);
+
+ guc_enable_interrupts(guc);
+
+ /* check for CT messages received before we enabled interrupts */
+ spin_lock_irq(&i915->irq_lock);
+ intel_guc_to_host_event_handler_ct(guc);
+ spin_unlock_irq(&i915->irq_lock);
+
+ DRM_INFO("GuC communication enabled\n");
+
+ return 0;
+}
+
+static void guc_stop_communication(struct intel_guc *guc)
+{
+ intel_guc_ct_stop(&guc->ct);
+
+ guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
+
+ guc_clear_mmio_msg(guc);
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ /*
+ * Events generated during or after CT disable are logged by guc in
+ * via mmio. Make sure the register is clear before disabling CT since
+ * all events we cared about have already been processed via CT.
+ */
+ guc_clear_mmio_msg(guc);
+
+ guc_disable_interrupts(guc);
+
+ guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
+
+ intel_guc_ct_disable(&guc->ct);
+
+ /*
+ * Check for messages received during/after the CT disable. We do not
+ * expect any messages to have arrived via CT between the interrupt
+ * disable and the CT disable because GuC should've been idle until we
+ * triggered the CT disable protocol.
+ */
+ guc_get_mmio_msg(guc);
+
+ DRM_INFO("GuC communication disabled\n");
+}
+
+void intel_uc_fetch_firmwares(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ int err;
+
+ if (!intel_uc_uses_guc(uc))
+ return;
+
+ err = intel_uc_fw_fetch(&uc->guc.fw, i915);
+ if (err)
+ return;
+
+ if (intel_uc_uses_huc(uc))
+ intel_uc_fw_fetch(&uc->huc.fw, i915);
+}
+
+void intel_uc_cleanup_firmwares(struct intel_uc *uc)
+{
+ if (!intel_uc_uses_guc(uc))
+ return;
+
+ if (intel_uc_uses_huc(uc))
+ intel_uc_fw_cleanup_fetch(&uc->huc.fw);
+
+ intel_uc_fw_cleanup_fetch(&uc->guc.fw);
+}
+
+void intel_uc_init(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret;
+
+ if (!intel_uc_uses_guc(uc))
+ return;
+
+ /* XXX: GuC submission is unavailable for now */
+ GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
+
+ ret = intel_guc_init(guc);
+ if (ret) {
+ intel_uc_fw_cleanup_fetch(&huc->fw);
+ return;
+ }
+
+ if (intel_uc_uses_huc(uc))
+ intel_huc_init(huc);
+}
+
+void intel_uc_fini(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_uc_uses_guc(uc))
+ return;
+
+ if (intel_uc_uses_huc(uc))
+ intel_huc_fini(&uc->huc);
+
+ intel_guc_fini(guc);
+
+ __uc_free_load_err_log(uc);
+}
+
+static int __uc_sanitize(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+
+ intel_huc_sanitize(huc);
+ intel_guc_sanitize(guc);
+
+ return __intel_uc_reset_hw(uc);
+}
+
+void intel_uc_sanitize(struct intel_uc *uc)
+{
+ if (!intel_uc_supports_guc(uc))
+ return;
+
+ __uc_sanitize(uc);
+}
+
+/* Initialize and verify the uC regs related to uC positioning in WOPCM */
+static int uc_init_wopcm(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+ u32 base = intel_wopcm_guc_base(&gt->i915->wopcm);
+ u32 size = intel_wopcm_guc_size(&gt->i915->wopcm);
+ u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
+ u32 mask;
+ int err;
+
+ if (unlikely(!base || !size)) {
+ i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
+ return -E2BIG;
+ }
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
+ GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
+ GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
+ GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
+
+ err = i915_inject_load_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
+ err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
+ size | GUC_WOPCM_SIZE_LOCKED);
+ if (err)
+ goto err_out;
+
+ mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
+ err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
+ base | huc_agent, mask,
+ base | huc_agent |
+ GUC_WOPCM_OFFSET_VALID);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
+ i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
+ intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
+ i915_mmio_reg_offset(GUC_WOPCM_SIZE),
+ intel_uncore_read(uncore, GUC_WOPCM_SIZE));
+
+ return err;
+}
+
+static bool uc_is_wopcm_locked(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+
+ return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
+ (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
+}
+
+int intel_uc_init_hw(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret, attempts;
+
+ if (!intel_uc_supports_guc(uc))
+ return 0;
+
+ /*
+ * We can silently continue without GuC only if it was never enabled
+ * before on this system after reboot, otherwise we risk GPU hangs.
+ * To check if GuC was loaded before we look at WOPCM registers.
+ */
+ if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
+ return 0;
+
+ if (!intel_uc_fw_is_available(&guc->fw)) {
+ ret = uc_is_wopcm_locked(uc) ||
+ intel_uc_fw_is_overridden(&guc->fw) ||
+ intel_uc_supports_guc_submission(uc) ?
+ intel_uc_fw_status_to_error(guc->fw.status) : 0;
+ goto err_out;
+ }
+
+ ret = uc_init_wopcm(uc);
+ if (ret)
+ goto err_out;
+
+ guc_reset_interrupts(guc);
+
+ /* WaEnableuKernelHeaderValidFix:skl */
+ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
+ if (IS_GEN(i915, 9))
+ attempts = 3;
+ else
+ attempts = 1;
+
+ while (attempts--) {
+ /*
+ * Always reset the GuC just before (re)loading, so
+ * that the state and timing are fairly predictable
+ */
+ ret = __uc_sanitize(uc);
+ if (ret)
+ goto err_out;
+
+ intel_huc_fw_upload(huc);
+ intel_guc_ads_reset(guc);
+ intel_guc_write_params(guc);
+ ret = intel_guc_fw_upload(guc);
+ if (ret == 0)
+ break;
+
+ DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", ret, attempts);
+ }
+
+ /* Did we succeded or run out of retries? */
+ if (ret)
+ goto err_log_capture;
+
+ ret = guc_enable_communication(guc);
+ if (ret)
+ goto err_log_capture;
+
+ intel_huc_auth(huc);
+
+ ret = intel_guc_sample_forcewake(guc);
+ if (ret)
+ goto err_communication;
+
+ if (intel_uc_supports_guc_submission(uc)) {
+ ret = intel_guc_submission_enable(guc);
+ if (ret)
+ goto err_communication;
+ }
+
+ dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
+ guc->fw.major_ver_found, guc->fw.minor_ver_found,
+ "submission",
+ enableddisabled(intel_uc_supports_guc_submission(uc)));
+
+ if (intel_uc_uses_huc(uc)) {
+ dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ huc->fw.path,
+ huc->fw.major_ver_found, huc->fw.minor_ver_found,
+ "authenticated",
+ yesno(intel_huc_is_authenticated(huc)));
+ }
+
+ return 0;
+
+ /*
+ * We've failed to load the firmware :(
+ */
+err_communication:
+ guc_disable_communication(guc);
+err_log_capture:
+ __uc_capture_load_err_log(uc);
+err_out:
+ __uc_sanitize(uc);
+
+ if (!ret) {
+ dev_notice(i915->drm.dev, "GuC is uninitialized\n");
+ /* We want to run without GuC submission */
+ return 0;
+ }
+
+ i915_probe_error(i915, "GuC initialization failed %d\n", ret);
+
+ /* We want to keep KMS alive */
+ return -EIO;
+}
+
+void intel_uc_fini_hw(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ if (intel_uc_supports_guc_submission(uc))
+ intel_guc_submission_disable(guc);
+
+ guc_disable_communication(guc);
+ __uc_sanitize(uc);
+}
+
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @uc: the intel_uc structure
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ guc_stop_communication(guc);
+ __uc_sanitize(uc);
+}
+
+void intel_uc_runtime_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ int err;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ err = intel_guc_suspend(guc);
+ if (err)
+ DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+
+ guc_disable_communication(guc);
+}
+
+void intel_uc_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
+ intel_uc_runtime_suspend(uc);
+}
+
+static int __uc_resume(struct intel_uc *uc, bool enable_communication)
+{
+ struct intel_guc *guc = &uc->guc;
+ int err;
+
+ if (!intel_guc_is_running(guc))
+ return 0;
+
+ /* Make sure we enable communication if and only if it's disabled */
+ GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+
+ if (enable_communication)
+ guc_enable_communication(guc);
+
+ err = intel_guc_resume(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_uc_resume(struct intel_uc *uc)
+{
+ /*
+ * When coming out of S3/S4 we sanitize and re-init the HW, so
+ * communication is already re-enabled at this point.
+ */
+ return __uc_resume(uc, false);
+}
+
+int intel_uc_runtime_resume(struct intel_uc *uc)
+{
+ /*
+ * During runtime resume we don't sanitize, so we need to re-init
+ * communication as well.
+ */
+ return __uc_resume(uc, true);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
new file mode 100644
index 000000000000..527995c21196
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_H_
+#define _INTEL_UC_H_
+
+#include "intel_guc.h"
+#include "intel_huc.h"
+#include "i915_params.h"
+
+struct intel_uc {
+ struct intel_guc guc;
+ struct intel_huc huc;
+
+ /* Snapshot of GuC log from last failed load */
+ struct drm_i915_gem_object *load_err_log;
+};
+
+void intel_uc_init_early(struct intel_uc *uc);
+void intel_uc_driver_late_release(struct intel_uc *uc);
+void intel_uc_init_mmio(struct intel_uc *uc);
+void intel_uc_fetch_firmwares(struct intel_uc *uc);
+void intel_uc_cleanup_firmwares(struct intel_uc *uc);
+void intel_uc_sanitize(struct intel_uc *uc);
+void intel_uc_init(struct intel_uc *uc);
+int intel_uc_init_hw(struct intel_uc *uc);
+void intel_uc_fini_hw(struct intel_uc *uc);
+void intel_uc_fini(struct intel_uc *uc);
+void intel_uc_reset_prepare(struct intel_uc *uc);
+void intel_uc_suspend(struct intel_uc *uc);
+void intel_uc_runtime_suspend(struct intel_uc *uc);
+int intel_uc_resume(struct intel_uc *uc);
+int intel_uc_runtime_resume(struct intel_uc *uc);
+
+static inline bool intel_uc_supports_guc(struct intel_uc *uc)
+{
+ return intel_guc_is_supported(&uc->guc);
+}
+
+static inline bool intel_uc_uses_guc(struct intel_uc *uc)
+{
+ return intel_guc_is_enabled(&uc->guc);
+}
+
+static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
+{
+ return intel_guc_is_submission_supported(&uc->guc);
+}
+
+static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
+{
+ return intel_guc_is_submission_supported(&uc->guc);
+}
+
+static inline bool intel_uc_supports_huc(struct intel_uc *uc)
+{
+ return intel_uc_supports_guc(uc);
+}
+
+static inline bool intel_uc_uses_huc(struct intel_uc *uc)
+{
+ return intel_huc_is_enabled(&uc->huc);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
new file mode 100644
index 000000000000..bd22bf11adad
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <drm/drm_print.h>
+
+#include "intel_uc_fw.h"
+#include "intel_uc_fw_abi.h"
+#include "i915_drv.h"
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ return container_of(uc_fw, struct intel_gt, uc.guc.fw);
+
+ GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
+ return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+}
+
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+ DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
+ "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->path : intel_uc_fw_status_repr(status));
+}
+#endif
+
+/*
+ * List of required GuC and HuC binaries per-platform.
+ * Must be ordered based on platform + revid, from newer to older.
+ */
+#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398))
+
+#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) separator_ \
+ __stringify(minor_) separator_ \
+ __stringify(patch_) ".bin"
+
+#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_)
+
+#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
+ __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_)
+
+/* All blobs need to be declared via MODULE_FIRMWARE() */
+#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
+ MODULE_FIRMWARE(guc_); \
+ MODULE_FIRMWARE(huc_);
+
+INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH)
+
+/* The below structs and macros are used to iterate across the list of blobs */
+struct __packed uc_fw_blob {
+ u8 major;
+ u8 minor;
+ const char *path;
+};
+
+#define UC_FW_BLOB(major_, minor_, path_) \
+ { .major = major_, .minor = minor_, .path = path_ }
+
+#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB(major_, minor_, \
+ MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
+
+#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
+ UC_FW_BLOB(major_, minor_, \
+ MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
+
+struct __packed uc_fw_platform_requirement {
+ enum intel_platform p;
+ u8 rev; /* first platform rev using this FW */
+ const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES];
+};
+
+#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
+{ \
+ .p = INTEL_##platform_, \
+ .rev = revid_, \
+ .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
+ .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
+},
+
+static void
+__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
+{
+ static const struct uc_fw_platform_requirement fw_blobs[] = {
+ INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
+ if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
+ const struct uc_fw_blob *blob =
+ &fw_blobs[i].blobs[uc_fw->type];
+ uc_fw->path = blob->path;
+ uc_fw->major_ver_wanted = blob->major;
+ uc_fw->minor_ver_wanted = blob->minor;
+ break;
+ }
+ }
+
+ /* make sure the list is ordered as expected */
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
+ for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) {
+ if (fw_blobs[i].p < fw_blobs[i - 1].p)
+ continue;
+
+ if (fw_blobs[i].p == fw_blobs[i - 1].p &&
+ fw_blobs[i].rev < fw_blobs[i - 1].rev)
+ continue;
+
+ pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
+ intel_platform_name(fw_blobs[i - 1].p),
+ fw_blobs[i - 1].rev,
+ intel_platform_name(fw_blobs[i].p),
+ fw_blobs[i].rev);
+
+ uc_fw->path = NULL;
+ }
+ }
+
+ /* We don't want to enable GuC/HuC on pre-Gen11 by default */
+ if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE)
+ uc_fw->path = NULL;
+}
+
+static const char *__override_guc_firmware_path(void)
+{
+ if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ return i915_modparams.guc_firmware_path;
+ return "";
+}
+
+static const char *__override_huc_firmware_path(void)
+{
+ if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC)
+ return i915_modparams.huc_firmware_path;
+ return "";
+}
+
+static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
+{
+ const char *path = NULL;
+
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ path = __override_guc_firmware_path();
+ break;
+ case INTEL_UC_FW_TYPE_HUC:
+ path = __override_huc_firmware_path();
+ break;
+ }
+
+ if (unlikely(path)) {
+ uc_fw->path = path;
+ uc_fw->user_overridden = true;
+ }
+}
+
+/**
+ * intel_uc_fw_init_early - initialize the uC object and select the firmware
+ * @uc_fw: uC firmware
+ * @type: type of uC
+ * @supported: is uC support possible
+ * @platform: platform identifier
+ * @rev: hardware revision
+ *
+ * Initialize the state of our uC object and relevant tracking and select the
+ * firmware to fetch and load.
+ */
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type, bool supported,
+ enum intel_platform platform, u8 rev)
+{
+ /*
+ * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
+ * before we're looked at the HW caps to see if we have uc support
+ */
+ BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
+ GEM_BUG_ON(uc_fw->status);
+ GEM_BUG_ON(uc_fw->path);
+
+ uc_fw->type = type;
+
+ if (supported) {
+ __uc_fw_auto_select(uc_fw, platform, rev);
+ __uc_fw_user_override(uc_fw);
+ }
+
+ intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
+ INTEL_UC_FIRMWARE_SELECTED :
+ INTEL_UC_FIRMWARE_DISABLED :
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+}
+
+static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
+ struct drm_i915_private *i915,
+ int e)
+{
+ bool user = e == -EINVAL;
+
+ if (i915_inject_load_error(i915, e)) {
+ /* non-existing blob */
+ uc_fw->path = "<invalid>";
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_load_error(i915, e)) {
+ /* require next major version */
+ uc_fw->major_ver_wanted += 1;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_load_error(i915, e)) {
+ /* require next minor version */
+ uc_fw->minor_ver_wanted += 1;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) {
+ /* require prev major version */
+ uc_fw->major_ver_wanted -= 1;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) {
+ /* require prev minor version - hey, this should work! */
+ uc_fw->minor_ver_wanted -= 1;
+ uc_fw->user_overridden = user;
+ } else if (user && i915_inject_load_error(i915, e)) {
+ /* officially unsupported platform */
+ uc_fw->major_ver_wanted = 0;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = true;
+ }
+}
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ * @uc_fw: uC firmware
+ * @i915: device private
+ *
+ * Fetch uC firmware into GEM obj.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
+{
+ struct device *dev = i915->drm.dev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
+
+ err = i915_inject_load_error(i915, -ENXIO);
+ if (err)
+ return err;
+
+ __force_fw_fetch_failures(uc_fw, i915, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, i915, -ESTALE);
+
+ err = request_firmware(&fw, uc_fw->path, dev);
+ if (err)
+ goto fail;
+
+ /* Check the size of the blob before examining buffer contents */
+ if (unlikely(fw->size < sizeof(struct uc_css_header))) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, sizeof(struct uc_css_header));
+ err = -ENODATA;
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Check integrity of size values inside CSS header */
+ size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
+ css->exponent_size_dw) * sizeof(u32);
+ if (unlikely(size != sizeof(struct uc_css_header))) {
+ dev_warn(dev,
+ "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, sizeof(struct uc_css_header));
+ err = -EPROTO;
+ goto fail;
+ }
+
+ /* uCode size must calculated from other sizes */
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
+ dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
+ err = -EPROTO;
+ goto fail;
+ }
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (unlikely(fw->size < size)) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, size);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /* Sanity check whether this fw is not larger than whole WOPCM memory */
+ size = __intel_uc_fw_get_upload_size(uc_fw);
+ if (unlikely(size >= i915->wopcm.size)) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ size, (size_t)i915->wopcm.size);
+ err = -E2BIG;
+ goto fail;
+ }
+
+ /* Get version numbers from the CSS header */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
+ css->sw_version);
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
+ css->sw_version);
+ break;
+
+ default:
+ MISSING_CASE(uc_fw->type);
+ break;
+ }
+
+ if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ if (!intel_uc_fw_is_overridden(uc_fw)) {
+ err = -ENOEXEC;
+ goto fail;
+ }
+ }
+
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+
+ release_firmware(fw);
+ return 0;
+
+fail:
+ intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
+ INTEL_UC_FIRMWARE_MISSING :
+ INTEL_UC_FIRMWARE_ERROR);
+
+ dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+ return err;
+}
+
+static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
+{
+ struct drm_mm_node *node = &ggtt->uc_fw;
+
+ GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(upper_32_bits(node->start));
+ GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+
+ return lower_32_bits(node->start);
+}
+
+static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
+ struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct i915_vma dummy = {
+ .node.start = uc_fw_ggtt_offset(uc_fw, ggtt),
+ .node.size = obj->base.size,
+ .pages = obj->mm.pages,
+ .vm = &ggtt->vm,
+ };
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
+
+ /* uc_fw->obj cache domains were not controlled across suspend */
+ drm_clflush_sg(dummy.pages);
+
+ ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+}
+
+static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw,
+ struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ u64 start = uc_fw_ggtt_offset(uc_fw, ggtt);
+
+ ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
+}
+
+static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
+ u32 wopcm_offset, u32 dma_flags)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u64 offset;
+ int ret;
+
+ ret = i915_inject_load_error(gt->i915, -ETIMEDOUT);
+ if (ret)
+ return ret;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ /* Set the source address for the uCode */
+ offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt);
+ GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
+
+ /* Set the DMA destination */
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset);
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /*
+ * Set the transfer size. The header plus uCode will be copied to WOPCM
+ * via DMA, excluding any other components
+ */
+ intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
+ sizeof(struct uc_css_header) + uc_fw->ucode_size);
+
+ /* Start the DMA */
+ intel_uncore_write_fw(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
+ if (ret)
+ dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
+
+ /* Disable the bits once DMA is over */
+ intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/**
+ * intel_uc_fw_upload - load uC firmware using custom loader
+ * @uc_fw: uC firmware
+ * @gt: the intel_gt structure
+ * @wopcm_offset: destination offset in wopcm
+ * @dma_flags: flags for flags for dma ctrl
+ *
+ * Loads uC firmware and updates internal flags.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
+ u32 wopcm_offset, u32 dma_flags)
+{
+ int err;
+
+ /* make sure the status was cleared the last time we reset the uc */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ err = i915_inject_load_error(gt->i915, -ENOEXEC);
+ if (err)
+ return err;
+
+ if (!intel_uc_fw_is_available(uc_fw))
+ return -ENOEXEC;
+
+ /* Call custom loader */
+ intel_uc_fw_ggtt_bind(uc_fw, gt);
+ err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
+ intel_uc_fw_ggtt_unbind(uc_fw, gt);
+ if (err)
+ goto fail;
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+ return 0;
+
+fail:
+ i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ return err;
+}
+
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
+{
+ int err;
+
+ /* this should happen before the load! */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ if (!intel_uc_fw_is_available(uc_fw))
+ return -ENOEXEC;
+
+ err = i915_gem_object_pin_pages(uc_fw->obj);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ }
+
+ return err;
+}
+
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return;
+
+ i915_gem_object_unpin_pages(uc_fw->obj);
+}
+
+/**
+ * intel_uc_fw_cleanup_fetch - cleanup uC firmware
+ * @uc_fw: uC firmware
+ *
+ * Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return;
+
+ i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
+}
+
+/**
+ * intel_uc_fw_copy_rsa - copy fw RSA to buffer
+ *
+ * @uc_fw: uC firmware
+ * @dst: dst buffer
+ * @max_len: max number of bytes to copy
+ *
+ * Return: number of copied bytes.
+ */
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
+{
+ struct sg_table *pages = uc_fw->obj->mm.pages;
+ u32 size = min_t(u32, uc_fw->rsa_size, max_len);
+ u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
+
+ GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
+
+ return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
+}
+
+/**
+ * intel_uc_fw_dump - dump information about uC firmware
+ * @uc_fw: uC firmware
+ * @p: the &drm_printer
+ *
+ * Pretty printer for uC firmware.
+ */
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
+{
+ drm_printf(p, "%s firmware: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ drm_printf(p, "\tstatus: %s\n",
+ intel_uc_fw_status_repr(uc_fw->status));
+ drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
+ drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
new file mode 100644
index 000000000000..7a0a5989afc9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_H_
+#define _INTEL_UC_FW_H_
+
+#include <linux/types.h>
+#include "intel_uc_fw_abi.h"
+#include "intel_device_info.h"
+#include "i915_gem.h"
+
+struct drm_printer;
+struct drm_i915_private;
+struct intel_gt;
+
+/* Home of GuC, HuC and DMC firmwares */
+#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
+
+/*
+ * +------------+---------------------------------------------------+
+ * | PHASE | FIRMWARE STATUS TRANSITIONS |
+ * +============+===================================================+
+ * | | UNINITIALIZED |
+ * +------------+- / | \ -+
+ * | | DISABLED <--/ | \--> NOT_SUPPORTED |
+ * | init_early | V |
+ * | | SELECTED |
+ * +------------+- / | \ -+
+ * | | MISSING <--/ | \--> ERROR |
+ * | fetch | | |
+ * | | /------> AVAILABLE <---<-----------\ |
+ * +------------+- \ / \ \ \ -+
+ * | | FAIL <--< \--> TRANSFERRED \ |
+ * | upload | \ / \ / |
+ * | | \---------/ \--> RUNNING |
+ * +------------+---------------------------------------------------+
+ */
+
+enum intel_uc_fw_status {
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */
+ INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */
+ INTEL_UC_FIRMWARE_DISABLED, /* disabled */
+ INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */
+ INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
+ INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
+ INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
+ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
+ INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
+};
+
+enum intel_uc_fw_type {
+ INTEL_UC_FW_TYPE_GUC = 0,
+ INTEL_UC_FW_TYPE_HUC
+};
+#define INTEL_UC_FW_NUM_TYPES 2
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the uC.
+ */
+struct intel_uc_fw {
+ enum intel_uc_fw_type type;
+ union {
+ const enum intel_uc_fw_status status;
+ enum intel_uc_fw_status __status; /* no accidental overwrites */
+ };
+ const char *path;
+ bool user_overridden;
+ size_t size;
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+ u16 major_ver_wanted;
+ u16 minor_ver_wanted;
+ u16 major_ver_found;
+ u16 minor_ver_found;
+
+ u32 rsa_size;
+ u32 ucode_size;
+};
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status);
+#else
+static inline void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+}
+#endif
+
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return "N/A";
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return "UNINITIALIZED";
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return "DISABLED";
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return "SELECTED";
+ case INTEL_UC_FIRMWARE_MISSING:
+ return "MISSING";
+ case INTEL_UC_FIRMWARE_ERROR:
+ return "ERROR";
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ return "TRANSFERRED";
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return "RUNNING";
+ }
+ return "<invalid>";
+}
+
+static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return -ENODEV;
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return -EACCES;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EPERM;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOENT;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ case INTEL_UC_FIRMWARE_FAIL:
+ return -EIO;
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return -ESTALE;
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
+static inline enum intel_uc_fw_status
+__intel_uc_fw_status(struct intel_uc_fw *uc_fw)
+{
+ /* shouldn't call this before checking hw/blob availability */
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return uc_fw->status;
+}
+
+static inline bool intel_uc_fw_is_supported(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED;
+}
+
+static inline bool intel_uc_fw_is_enabled(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) > INTEL_UC_FIRMWARE_DISABLED;
+}
+
+static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
+}
+
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
+}
+
+static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
+}
+
+static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->user_overridden;
+}
+
+static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
+{
+ if (intel_uc_fw_is_loaded(uc_fw))
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+}
+
+static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ return sizeof(struct uc_css_header) + uc_fw->ucode_size;
+}
+
+/**
+ * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
+ * @uc_fw: uC firmware.
+ *
+ * Get the size of the firmware and header that will be uploaded to WOPCM.
+ *
+ * Return: Upload firmware size, or zero on firmware fetch failure.
+ */
+static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return 0;
+
+ return __intel_uc_fw_get_upload_size(uc_fw);
+}
+
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type, bool supported,
+ enum intel_platform platform, u8 rev);
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915);
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
+ u32 wopcm_offset, u32 dma_flags);
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
new file mode 100644
index 000000000000..ae58e8a8c53b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_ABI_H
+#define _INTEL_UC_FW_ABI_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/**
+ * DOC: Firmware Layout
+ *
+ * The GuC/HuC firmware layout looks like this::
+ *
+ * +======================================================================+
+ * | Firmware blob |
+ * +===============+===============+============+============+============+
+ * | CSS header | uCode | RSA key | modulus | exponent |
+ * +===============+===============+============+============+============+
+ * <-header size-> <---header size continued ----------->
+ * <--- size ----------------------------------------------------------->
+ * <-key size->
+ * <-mod size->
+ * <-exp size->
+ *
+ * The firmware may or may not have modulus key and exponent data. The header,
+ * uCode and RSA signature are must-have components that will be used by driver.
+ * Length of each components, which is all in dwords, can be found in header.
+ * In the case that modulus and exponent are not present in fw, a.k.a truncated
+ * image, the length value still appears in header.
+ *
+ * Driver will do some basic fw size validation based on the following rules:
+ *
+ * 1. Header, uCode and RSA are must-have components.
+ * 2. All firmware components, if they present, are in the sequence illustrated
+ * in the layout table above.
+ * 3. Length info of each component can be found in header, in dwords.
+ * 4. Modulus and exponent key are not required by driver. They may not appear
+ * in fw. So driver will load a truncated firmware in this case.
+ *
+ * The only difference between GuC and HuC firmwares is how the version
+ * information is saved.
+ */
+
+struct uc_css_header {
+ u32 module_type;
+ /*
+ * header_size includes all non-uCode bits, including css_header, rsa
+ * key, modulus key and exponent data.
+ */
+ u32 header_size_dw;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
+ u32 date;
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
+ u32 size_dw; /* uCode plus header_size_dw */
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_DATE_MIN (0xFF << 8)
+#define CSS_DATE_SEC (0xFFFF << 16)
+ char username[8];
+ char buildnumber[12];
+ u32 sw_version;
+#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
+#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
+#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
+ u32 reserved[14];
+ u32 header_info;
+} __packed;
+static_assert(sizeof(struct uc_css_header) == 128);
+
+#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 6ca8584cd64c..bba0eafe1cdb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include "i915_selftest.h"
@@ -103,17 +84,9 @@ static int ring_doorbell_nop(struct intel_guc_client *client)
/*
* Basic client sanity check, handy to validate create_clients.
*/
-static int validate_client(struct intel_guc_client *client,
- int client_priority,
- bool is_preempt_client)
+static int validate_client(struct intel_guc_client *client, int client_priority)
{
- struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
- struct i915_gem_context *ctx_owner = is_preempt_client ?
- dev_priv->preempt_context : dev_priv->kernel_context;
-
- if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->engine_mask ||
- client->priority != client_priority ||
+ if (client->priority != client_priority ||
client->doorbell_id == GUC_DOORBELL_INVALID)
return -EINVAL;
else
@@ -142,11 +115,11 @@ static int igt_guc_clients(void *args)
struct intel_guc *guc;
int err = 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GT_UC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- guc = &dev_priv->guc;
+ guc = &dev_priv->gt.uc.guc;
if (!guc) {
pr_err("No guc object!\n");
err = -EINVAL;
@@ -163,7 +136,7 @@ static int igt_guc_clients(void *args)
*/
guc_clients_disable(guc);
guc_clients_destroy(guc);
- if (guc->execbuf_client || guc->preempt_client) {
+ if (guc->execbuf_client) {
pr_err("guc_clients_destroy lied!\n");
err = -EINVAL;
goto unlock;
@@ -177,24 +150,14 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!guc->execbuf_client);
err = validate_client(guc->execbuf_client,
- GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
+ GUC_CLIENT_PRIORITY_KMD_NORMAL);
if (err) {
pr_err("execbug client validation failed\n");
goto out;
}
- if (guc->preempt_client) {
- err = validate_client(guc->preempt_client,
- GUC_CLIENT_PRIORITY_KMD_HIGH, true);
- if (err) {
- pr_err("preempt client validation failed\n");
- goto out;
- }
- }
-
- /* each client should now have reserved a doorbell */
- if (!has_doorbell(guc->execbuf_client) ||
- (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
+ /* the client should now have reserved a doorbell */
+ if (!has_doorbell(guc->execbuf_client)) {
pr_err("guc_clients_create didn't reserve doorbells\n");
err = -EINVAL;
goto out;
@@ -204,8 +167,7 @@ static int igt_guc_clients(void *args)
guc_clients_enable(guc);
/* each client should now have received a doorbell */
- if (!client_doorbell_in_sync(guc->execbuf_client) ||
- !client_doorbell_in_sync(guc->preempt_client)) {
+ if (!client_doorbell_in_sync(guc->execbuf_client)) {
pr_err("failed to initialize the doorbells\n");
err = -EINVAL;
goto out;
@@ -245,11 +207,11 @@ static int igt_guc_doorbells(void *arg)
int i, err = 0;
u16 db_id;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GT_UC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- guc = &dev_priv->guc;
+ guc = &dev_priv->gt.uc.guc;
if (!guc) {
pr_err("No guc object!\n");
err = -EINVAL;
@@ -261,10 +223,7 @@ static int igt_guc_doorbells(void *arg)
goto unlock;
for (i = 0; i < ATTEMPTS; i++) {
- clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- i % GUC_CLIENT_PRIORITY_NUM,
- dev_priv->kernel_context);
+ clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM);
if (!clients[i]) {
pr_err("[%d] No guc client\n", i);
@@ -300,8 +259,7 @@ static int igt_guc_doorbells(void *arg)
goto out;
}
- err = validate_client(clients[i],
- i % GUC_CLIENT_PRIORITY_NUM, false);
+ err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
if (err) {
pr_err("[%d] client_alloc sanity check failed!\n", i);
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index c3d19d88da40..5ff2437b2998 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -172,14 +172,14 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
@@ -195,7 +195,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(dev_priv);
@@ -207,7 +207,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return 0;
out_free_fence:
@@ -220,7 +220,7 @@ out_free_fence:
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b09dc315e2da..e753b1e706e2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -374,21 +374,37 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+#define DWORD_FIELD(dword, end, start) \
+ FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
+
+#define OP_LENGTH_BIAS 2
+#define CMD_LEN(value) (value + OP_LENGTH_BIAS)
+
+static int gvt_check_valid_cmd_length(int len, int valid_len)
+{
+ if (valid_len != len) {
+ gvt_err("len is not valid: len=%u valid_len=%u\n",
+ len, valid_len);
+ return -EFAULT;
+ }
+ return 0;
+}
+
struct cmd_info {
const char *name;
u32 opcode;
-#define F_LEN_MASK (1U<<0)
+#define F_LEN_MASK 3U
#define F_LEN_CONST 1U
#define F_LEN_VAR 0U
+/* value is const although LEN maybe variable */
+#define F_LEN_VAR_FIXED (1<<1)
/*
* command has its own ip advance logic
* e.g. MI_BATCH_START, MI_BATCH_END
*/
-#define F_IP_ADVANCE_CUSTOM (1<<1)
-
-#define F_POST_HANDLE (1<<2)
+#define F_IP_ADVANCE_CUSTOM (1<<2)
u32 flag;
#define R_RCS BIT(RCS0)
@@ -418,9 +434,12 @@ struct cmd_info {
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
- u8 len;
+ u32 len;
parser_cmd_handler handler;
+
+ /* valid length in DWord */
+ u32 valid_len;
};
struct cmd_entry {
@@ -944,6 +963,18 @@ static int cmd_handler_lri(struct parser_exec_state *s)
int i, ret = 0;
int cmd_len = cmd_length(s);
struct intel_gvt *gvt = s->vgpu->gvt;
+ u32 valid_len = CMD_LEN(1);
+
+ /*
+ * Official intel docs are somewhat sloppy , check the definition of
+ * MI_LOAD_REGISTER_IMM.
+ */
+ #define MAX_VALID_LEN 127
+ if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
+ gvt_err("len is not valid: len=%u valid_len=%u\n",
+ cmd_len, valid_len);
+ return -EFAULT;
+ }
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
@@ -1375,6 +1406,15 @@ static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
int ret;
int i;
int len = cmd_length(s);
+ u32 valid_len = CMD_LEN(1);
+
+ /* Flip Type == Stereo 3D Flip */
+ if (DWORD_FIELD(2, 1, 0) == 2)
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
ret = decode_mi_display_flip(s, &info);
if (ret) {
@@ -1494,12 +1534,21 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
int op_size = (cmd_length(s) - 3) * sizeof(u32);
int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
unsigned long gma, gma_low, gma_high;
+ u32 valid_len = CMD_LEN(2);
int ret = 0;
/* check ppggt */
if (!(cmd_val(s, 0) & (1 << 22)))
return 0;
+ /* check if QWORD */
+ if (DWORD_FIELD(0, 21, 21))
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
+
gma = cmd_val(s, 2) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
@@ -1542,11 +1591,20 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
sizeof(u32);
unsigned long gma, gma_high;
+ u32 valid_len = CMD_LEN(1);
int ret = 0;
if (!(cmd_val(s, 0) & (1 << 22)))
return ret;
+ /* check if QWORD */
+ if (DWORD_FIELD(0, 20, 19) == 1)
+ valid_len += 8;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
+
gma = cmd_val(s, 1) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
gma_high = cmd_val(s, 2) & GENMASK(15, 0);
@@ -1584,6 +1642,16 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
bool index_mode = false;
int ret = 0;
u32 hws_pga, val;
+ u32 valid_len = CMD_LEN(2);
+
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret) {
+ /* Check again for Qword */
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ ++valid_len);
+ return ret;
+ }
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1661,7 +1729,9 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
-static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
+static int find_bb_size(struct parser_exec_state *s,
+ unsigned long *bb_size,
+ unsigned long *bb_end_cmd_offset)
{
unsigned long gma = 0;
const struct cmd_info *info;
@@ -1673,6 +1743,7 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
*bb_size = 0;
+ *bb_end_cmd_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
@@ -1708,6 +1779,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
/* chained batch buffer */
bb_end = true;
}
+
+ if (bb_end)
+ *bb_end_cmd_offset = *bb_size;
+
cmd_len = get_cmd_length(info, cmd) << 2;
*bb_size += cmd_len;
gma += cmd_len;
@@ -1716,12 +1791,36 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
return 0;
}
+static int audit_bb_end(struct parser_exec_state *s, void *va)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 cmd = *(u32 *)va;
+ const struct cmd_info *info;
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+ cmd, get_opcode(cmd, s->ring_id),
+ (s->buf_addr_type == PPGTT_BUFFER) ?
+ "ppgtt" : "ggtt", s->ring_id, s->workload);
+ return -EBADRQC;
+ }
+
+ if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
+ ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
+ (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
+ return 0;
+
+ return -EBADRQC;
+}
+
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0;
unsigned long bb_size;
+ unsigned long bb_end_cmd_offset;
int ret = 0;
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
@@ -1732,7 +1831,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
- ret = find_bb_size(s, &bb_size);
+ ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
if (ret)
return ret;
@@ -1788,6 +1887,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
goto err_unmap;
}
+ ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
+ if (ret)
+ goto err_unmap;
+
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
@@ -1912,21 +2015,24 @@ static const struct cmd_info cmd_info[] = {
{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
NULL},
- {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
- {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
- 0, 8, NULL},
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
- {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
+ D_ALL, 0, 8, NULL, CMD_LEN(0)},
- {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
- D_BDW_PLUS, 0, 8, NULL},
+ {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
+ NULL, CMD_LEN(0)},
- {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
- D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+ {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
+ 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
@@ -1940,8 +2046,9 @@ static const struct cmd_info cmd_info[] = {
{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
cmd_handler_mi_update_gtt},
- {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_srm, CMD_LEN(2)},
{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
cmd_handler_mi_flush_dw},
@@ -1949,26 +2056,30 @@ static const struct cmd_info cmd_info[] = {
{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
10, cmd_handler_mi_clflush},
- {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
+ cmd_handler_mi_report_perf_count, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_lrm, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
- D_ALL, 0, 8, cmd_handler_lrr},
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
+ cmd_handler_lrr, CMD_LEN(1)},
- {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
- D_ALL, 0, 8, NULL},
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
+ 8, NULL, CMD_LEN(2)},
- {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
- ADDR_FIX_1(2), 8, NULL},
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(2), 8, NULL},
- {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
- 8, cmd_handler_mi_op_2e},
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
8, cmd_handler_mi_op_2f},
@@ -1978,8 +2089,8 @@ static const struct cmd_info cmd_info[] = {
cmd_handler_mi_batch_buffer_start},
{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
- F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
- cmd_handler_mi_conditional_batch_buffer_end},
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
R_RCS | R_BCS, D_ALL, 0, 2, NULL},
@@ -2569,6 +2680,13 @@ static int cmd_parser_exec(struct parser_exec_state *s)
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
+ if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ info->valid_len);
+ if (ret)
+ return ret;
+ }
+
if (info->handler) {
ret = info->handler(s);
if (ret < 0) {
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2fb7b73b260d..285f6011a537 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -189,36 +189,19 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
* @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
*/
-int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
- struct dentry *ent;
char name[16] = "";
snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
- if (!vgpu->debugfs)
- return -ENOMEM;
-
- ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
- &vgpu->active);
- if (!ent)
- return -ENOMEM;
-
- ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
- vgpu, &vgpu_mmio_diff_fops);
- if (!ent)
- return -ENOMEM;
- ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs,
- vgpu, &vgpu_scan_nonprivbb_fops);
- if (!ent)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
+ debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
+ &vgpu_mmio_diff_fops);
+ debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
+ &vgpu_scan_nonprivbb_fops);
}
/**
@@ -234,27 +217,15 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
/**
* intel_gvt_debugfs_init - register gvt debugfs root entry
* @gvt: GVT device
- *
- * Returns:
- * zero on success, negative if failed.
*/
-int intel_gvt_debugfs_init(struct intel_gvt *gvt)
+void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
struct drm_minor *minor = gvt->dev_priv->drm.primary;
- struct dentry *ent;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
- if (!gvt->debugfs_root) {
- gvt_err("Cannot create debugfs dir\n");
- return -ENOMEM;
- }
- ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
- &gvt->mmio.num_tracked_mmio);
- if (!ent)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
+ &gvt->mmio.num_tracked_mmio);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 41c8ebc60c63..13044c027f27 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -491,7 +491,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
obj->gvt_info = dmabuf_obj->info;
- dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+ dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
if (IS_ERR(dmabuf)) {
gvt_vgpu_err("export dma-buf failed\n");
ret = PTR_ERR(dmabuf);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 42d0394f0de2..88789316807d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -205,17 +205,18 @@ struct intel_vgpu_gtt {
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
};
-extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
-extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
-extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
-extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+void intel_gvt_clean_gtt(struct intel_gvt *gvt);
-extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level, void *root_entry);
+struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level,
+ void *root_entry);
struct intel_vgpu_oos_page {
struct intel_vgpu_ppgtt_spt *spt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 43f4242062dd..8f37eefa0a02 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -375,9 +375,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
}
gvt->idle_vgpu = vgpu;
- ret = intel_gvt_debugfs_init(gvt);
- if (ret)
- gvt_err("debugfs registration failed, go on.\n");
+ intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 7a1fe44d45af..b47c6acaf9c0 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -334,6 +334,10 @@ struct intel_gvt {
struct {
struct engine_mmio *mmio;
int ctx_mmio_count[I915_NUM_ENGINES];
+ u32 *tlb_mmio_offset_list;
+ u32 tlb_mmio_offset_list_cnt;
+ u32 *mocs_mmio_offset_list;
+ u32 mocs_mmio_offset_list_cnt;
} engine_mmio_list;
struct dentry *debugfs_root;
@@ -682,9 +686,9 @@ static inline void intel_gvt_mmio_set_in_ctx(
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
}
-int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
-int intel_gvt_debugfs_init(struct intel_gvt *gvt);
+void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 951681813230..11accd3e1023 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -672,7 +672,7 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt)
hrtimer_cancel(&irq->vblank_timer.timer);
}
-#define VBLNAK_TIMER_PERIOD 16000000
+#define VBLANK_TIMER_PERIOD 16000000
/**
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
@@ -704,7 +704,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->timer.function = vblank_timer_fn;
- vblank_timer->period = VBLNAK_TIMER_PERIOD;
+ vblank_timer->period = VBLANK_TIMER_PERIOD;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 23aa3e50cbf8..343d79c1cb7e 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1306,7 +1306,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned int i;
int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
- size_t size;
int nr_areas = 1;
int cap_type_id;
@@ -1349,9 +1348,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
VFIO_REGION_INFO_FLAG_WRITE;
info.size = gvt_aperture_sz(vgpu->gvt);
- size = sizeof(*sparse) +
- (nr_areas * sizeof(*sparse->areas));
- sparse = kzalloc(size, GFP_KERNEL);
+ sparse = kzalloc(struct_size(sparse, areas, nr_areas),
+ GFP_KERNEL);
if (!sparse)
return -ENOMEM;
@@ -1416,9 +1414,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (cap_type_id) {
case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
ret = vfio_info_add_capability(&caps,
- &sparse->header, sizeof(*sparse) +
- (sparse->nr_areas *
- sizeof(*sparse->areas)));
+ &sparse->header,
+ struct_size(sparse, areas,
+ sparse->nr_areas));
if (ret) {
kfree(sparse);
return ret;
@@ -1798,9 +1796,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
"kvmgt_nr_cache_entries",
0444, vgpu->debugfs,
&vgpu->vdev.nr_cache_entries);
- if (!info->debugfs_cache_entries)
- gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2998999e8568..4208e40445b1 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -148,19 +148,27 @@ static struct {
u32 l3cc_table[GEN9_MOCS_SIZE / 2];
} gen9_render_mocs;
+static u32 gen9_mocs_mmio_offset_list[] = {
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
+};
+
static void load_render_mocs(struct drm_i915_private *dev_priv)
{
+ struct intel_gvt *gvt = dev_priv->gvt;
i915_reg_t offset;
- u32 regs[] = {
- [RCS0] = 0xc800,
- [VCS0] = 0xc900,
- [VCS1] = 0xca00,
- [BCS0] = 0xcc00,
- [VECS0] = 0xcb00,
- };
+ u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
+ u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
int ring_id, i;
- for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ /* Platform doesn't have mocs mmios. */
+ if (!regs)
+ return;
+
+ for (ring_id = 0; ring_id < cnt; ring_id++) {
if (!HAS_ENGINE(dev_priv, ring_id))
continue;
offset.reg = regs[ring_id];
@@ -327,22 +335,28 @@ out:
return ret;
}
+static u32 gen8_tlb_mmio_offset_list[] = {
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
+};
+
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
+ u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
+ u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
enum forcewake_domains fw;
i915_reg_t reg;
- u32 regs[] = {
- [RCS0] = 0x4260,
- [VCS0] = 0x4264,
- [VCS1] = 0x4268,
- [BCS0] = 0x426c,
- [VECS0] = 0x4270,
- };
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ if (!regs)
+ return;
+
+ if (WARN_ON(ring_id >= cnt))
return;
if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
@@ -565,10 +579,17 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9)
+ if (INTEL_GEN(gvt->dev_priv) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
- else
+ gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
+ gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
+ gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
+ } else {
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
+ }
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 75baff657e43..6c79d16b381e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,8 +84,8 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0;
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -291,9 +291,6 @@ shadow_context_descriptor_update(struct intel_context *ce,
* Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
- desc &= U64_MAX << 12;
- desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
-
desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
desc |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -571,6 +568,16 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
+static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ u32 ring_base;
+
+ ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+}
+
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
@@ -1019,6 +1026,13 @@ static int workload_thread(void *priv)
if (need_force_wake)
intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
+ /*
+ * Update the vReg of the vGPU which submitted this
+ * workload. The vGPU may use these registers for checking
+ * the context state. The value comes from GPU commands
+ * in this workload.
+ */
+ update_vreg_in_ctx(workload);
ret = dispatch_workload(workload);
@@ -1157,7 +1171,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id)
intel_context_unpin(s->shadow[id]);
@@ -1215,30 +1229,43 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id i;
int ret;
- ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ i915_gem_context_set_force_single_submission(ctx);
i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ for_each_engine(engine, i915, i) {
struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL);
- ce = i915_gem_context_get_engine(ctx, i);
+ ce = intel_context_create(ctx, engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_shadow_ctx;
}
+ if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ const unsigned int ring_size = 512 * SZ_4K;
+
+ ce->ring = __intel_context_ring_size(ring_size);
+ }
+
ret = intel_context_pin(ce);
intel_context_put(ce);
if (ret)
@@ -1265,17 +1292,21 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
i915_gem_context_put(ctx);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
out_shadow_ctx:
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ for_each_engine(engine, i915, i) {
if (IS_ERR(s->shadow[i]))
break;
intel_context_unpin(s->shadow[i]);
+ intel_context_put(s->shadow[i]);
}
i915_gem_context_put(ctx);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1424,9 +1455,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
-#define get_last_workload(q) \
- (list_empty(q) ? NULL : container_of(q->prev, \
- struct intel_vgpu_workload, list))
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
@@ -1446,7 +1474,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct list_head *q = workload_q_head(vgpu, ring_id);
- struct intel_vgpu_workload *last_workload = get_last_workload(q);
+ struct intel_vgpu_workload *last_workload = NULL;
struct intel_vgpu_workload *workload = NULL;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
@@ -1472,15 +1500,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
- if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
- gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
- /*
- * cannot use guest context head pointer here,
- * as it might not be updated at this time
- */
- head = last_workload->rb_tail;
+ list_for_each_entry_reverse(last_workload, q, list) {
+
+ if (same_context(&last_workload->ctx_desc, desc)) {
+ gvt_dbg_el("ring id %d cur workload == last\n",
+ ring_id);
+ gvt_dbg_el("ctx head %x real head %lx\n", head,
+ last_workload->rb_tail);
+ /*
+ * cannot use guest context head pointer here,
+ * as it might not be updated at this time
+ */
+ head = last_workload->rb_tail;
+ break;
+ }
}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 44ce3c2b9ac1..d5a6e4e3d0fd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -420,9 +420,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_submission;
- ret = intel_gvt_debugfs_add_vgpu(vgpu);
- if (ret)
- goto out_clean_sched_policy;
+ intel_gvt_debugfs_add_vgpu(vgpu);
ret = intel_gvt_hypervisor_set_opregion(vgpu);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 293e5bcc4b6c..48e16ad93bbd 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/debugobjects.h>
+
#include "gt/intel_engine_pm.h"
#include "i915_drv.h"
@@ -31,49 +33,149 @@ struct active_node {
u64 timeline;
};
-static void
-__active_park(struct i915_active *ref)
+static inline struct active_node *
+node_from_active(struct i915_active_request *active)
{
- struct active_node *it, *n;
+ return container_of(active, struct active_node, base);
+}
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- GEM_BUG_ON(i915_active_request_isset(&it->base));
- kmem_cache_free(global.slab_cache, it);
- }
- ref->tree = RB_ROOT;
+#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
+
+static inline bool is_barrier(const struct i915_active_request *active)
+{
+ return IS_ERR(rcu_access_pointer(active->request));
+}
+
+static inline struct llist_node *barrier_to_ll(struct active_node *node)
+{
+ GEM_BUG_ON(!is_barrier(&node->base));
+ return (struct llist_node *)&node->base.link;
+}
+
+static inline struct intel_engine_cs *
+__barrier_to_engine(struct active_node *node)
+{
+ return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
+}
+
+static inline struct intel_engine_cs *
+barrier_to_engine(struct active_node *node)
+{
+ GEM_BUG_ON(!is_barrier(&node->base));
+ return __barrier_to_engine(node);
+}
+
+static inline struct active_node *barrier_from_ll(struct llist_node *x)
+{
+ return container_of((struct list_head *)x,
+ struct active_node, base.link);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
+
+static void *active_debug_hint(void *addr)
+{
+ struct i915_active *ref = addr;
+
+ return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
+}
+
+static struct debug_obj_descr active_debug_desc = {
+ .name = "i915_active",
+ .debug_hint = active_debug_hint,
+};
+
+static void debug_active_init(struct i915_active *ref)
+{
+ debug_object_init(ref, &active_debug_desc);
+}
+
+static void debug_active_activate(struct i915_active *ref)
+{
+ debug_object_activate(ref, &active_debug_desc);
+}
+
+static void debug_active_deactivate(struct i915_active *ref)
+{
+ debug_object_deactivate(ref, &active_debug_desc);
}
+static void debug_active_fini(struct i915_active *ref)
+{
+ debug_object_free(ref, &active_debug_desc);
+}
+
+static void debug_active_assert(struct i915_active *ref)
+{
+ debug_object_assert_init(ref, &active_debug_desc);
+}
+
+#else
+
+static inline void debug_active_init(struct i915_active *ref) { }
+static inline void debug_active_activate(struct i915_active *ref) { }
+static inline void debug_active_deactivate(struct i915_active *ref) { }
+static inline void debug_active_fini(struct i915_active *ref) { }
+static inline void debug_active_assert(struct i915_active *ref) { }
+
+#endif
+
static void
__active_retire(struct i915_active *ref)
{
- GEM_BUG_ON(!ref->count);
- if (--ref->count)
+ struct active_node *it, *n;
+ struct rb_root root;
+ bool retire = false;
+
+ lockdep_assert_held(&ref->mutex);
+
+ /* return the unused nodes to our slabcache -- flushing the allocator */
+ if (atomic_dec_and_test(&ref->count)) {
+ debug_active_deactivate(ref);
+ root = ref->tree;
+ ref->tree = RB_ROOT;
+ ref->cache = NULL;
+ retire = true;
+ }
+
+ mutex_unlock(&ref->mutex);
+ if (!retire)
return;
- /* return the unused nodes to our slabcache */
- __active_park(ref);
+ rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+ GEM_BUG_ON(i915_active_request_isset(&it->base));
+ kmem_cache_free(global.slab_cache, it);
+ }
- ref->retire(ref);
+ /* After the final retire, the entire struct may be freed */
+ if (ref->retire)
+ ref->retire(ref);
}
static void
-node_retire(struct i915_active_request *base, struct i915_request *rq)
+active_retire(struct i915_active *ref)
{
- __active_retire(container_of(base, struct active_node, base)->ref);
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ /* One active may be flushed from inside the acquire of another */
+ mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ __active_retire(ref);
}
static void
-last_retire(struct i915_active_request *base, struct i915_request *rq)
+node_retire(struct i915_active_request *base, struct i915_request *rq)
{
- __active_retire(container_of(base, struct i915_active, last));
+ active_retire(node_from_active(base)->ref);
}
static struct i915_active_request *
-active_instance(struct i915_active *ref, u64 idx)
+active_instance(struct i915_active *ref, struct intel_timeline *tl)
{
- struct active_node *node;
+ struct active_node *node, *prealloc;
struct rb_node **p, *parent;
- struct i915_request *old;
+ u64 idx = tl->fence_context;
/*
* We track the most recently used timeline to skip a rbtree search
@@ -81,20 +183,18 @@ active_instance(struct i915_active *ref, u64 idx)
* at all. We can reuse the last slot if it is empty, that is
* after the previous activity has been retired, or if it matches the
* current timeline.
- *
- * Note that we allow the timeline to be active simultaneously in
- * the rbtree and the last cache. We do this to avoid having
- * to search and replace the rbtree element for a new timeline, with
- * the cost being that we must be aware that the ref may be retired
- * twice for the same timeline (as the older rbtree element will be
- * retired before the new request added to last).
*/
- old = i915_active_request_raw(&ref->last, BKL(ref));
- if (!old || old->fence.context == idx)
- goto out;
+ node = READ_ONCE(ref->cache);
+ if (node && node->timeline == idx)
+ return &node->base;
- /* Move the currently active fence into the rbtree */
- idx = old->fence.context;
+ /* Preallocate a replacement, just in case */
+ prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (!prealloc)
+ return NULL;
+
+ mutex_lock(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
parent = NULL;
p = &ref->tree.rb_node;
@@ -102,8 +202,10 @@ active_instance(struct i915_active *ref, u64 idx)
parent = *p;
node = rb_entry(parent, struct active_node, node);
- if (node->timeline == idx)
- goto replace;
+ if (node->timeline == idx) {
+ kmem_cache_free(global.slab_cache, prealloc);
+ goto out;
+ }
if (node->timeline < idx)
p = &parent->rb_right;
@@ -111,117 +213,230 @@ active_instance(struct i915_active *ref, u64 idx)
p = &parent->rb_left;
}
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-
- /* kmalloc may retire the ref->last (thanks shrinker)! */
- if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
- kmem_cache_free(global.slab_cache, node);
- goto out;
- }
-
- if (unlikely(!node))
- return ERR_PTR(-ENOMEM);
-
- i915_active_request_init(&node->base, NULL, node_retire);
+ node = prealloc;
+ i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
-replace:
- /*
- * Overwrite the previous active slot in the rbtree with last,
- * leaving last zeroed. If the previous slot is still active,
- * we must be careful as we now only expect to receive one retire
- * callback not two, and so much undo the active counting for the
- * overwritten slot.
- */
- if (i915_active_request_isset(&node->base)) {
- /* Retire ourselves from the old rq->active_list */
- __list_del_entry(&node->base.link);
- ref->count--;
- GEM_BUG_ON(!ref->count);
- }
- GEM_BUG_ON(list_empty(&ref->last.link));
- list_replace_init(&ref->last.link, &node->base.link);
- node->base.request = fetch_and_zero(&ref->last.request);
-
out:
- return &ref->last;
+ ref->cache = node;
+ mutex_unlock(&ref->mutex);
+
+ BUILD_BUG_ON(offsetof(typeof(*node), base));
+ return &node->base;
}
-void i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
- void (*retire)(struct i915_active *ref))
+void __i915_active_init(struct drm_i915_private *i915,
+ struct i915_active *ref,
+ int (*active)(struct i915_active *ref),
+ void (*retire)(struct i915_active *ref),
+ struct lock_class_key *key)
{
+ debug_active_init(ref);
+
ref->i915 = i915;
+ ref->flags = 0;
+ ref->active = active;
ref->retire = retire;
ref->tree = RB_ROOT;
- i915_active_request_init(&ref->last, NULL, last_retire);
- init_llist_head(&ref->barriers);
- ref->count = 0;
+ ref->cache = NULL;
+ init_llist_head(&ref->preallocated_barriers);
+ atomic_set(&ref->count, 0);
+ __mutex_init(&ref->mutex, "i915_active", key);
+}
+
+static bool ____active_del_barrier(struct i915_active *ref,
+ struct active_node *node,
+ struct intel_engine_cs *engine)
+
+{
+ struct llist_node *head = NULL, *tail = NULL;
+ struct llist_node *pos, *next;
+
+ GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
+
+ /*
+ * Rebuild the llist excluding our node. We may perform this
+ * outside of the kernel_context timeline mutex and so someone
+ * else may be manipulating the engine->barrier_tasks, in
+ * which case either we or they will be upset :)
+ *
+ * A second __active_del_barrier() will report failure to claim
+ * the active_node and the caller will just shrug and know not to
+ * claim ownership of its node.
+ *
+ * A concurrent i915_request_add_active_barriers() will miss adding
+ * any of the tasks, but we will try again on the next -- and since
+ * we are actively using the barrier, we know that there will be
+ * at least another opportunity when we idle.
+ */
+ llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
+ if (node == barrier_from_ll(pos)) {
+ node = NULL;
+ continue;
+ }
+
+ pos->next = head;
+ head = pos;
+ if (!tail)
+ tail = pos;
+ }
+ if (head)
+ llist_add_batch(head, tail, &engine->barrier_tasks);
+
+ return !node;
+}
+
+static bool
+__active_del_barrier(struct i915_active *ref, struct active_node *node)
+{
+ return ____active_del_barrier(ref, node, barrier_to_engine(node));
}
int i915_active_ref(struct i915_active *ref,
- u64 timeline,
+ struct intel_timeline *tl,
struct i915_request *rq)
{
struct i915_active_request *active;
- int err = 0;
+ int err;
+
+ lockdep_assert_held(&tl->mutex);
/* Prevent reaping in case we malloc/wait while building the tree */
- i915_active_acquire(ref);
+ err = i915_active_acquire(ref);
+ if (err)
+ return err;
- active = active_instance(ref, timeline);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
+ active = active_instance(ref, tl);
+ if (!active) {
+ err = -ENOMEM;
goto out;
}
- if (!i915_active_request_isset(active))
- ref->count++;
+ if (is_barrier(active)) { /* proto-node used by our idle barrier */
+ /*
+ * This request is on the kernel_context timeline, and so
+ * we can use it to substitute for the pending idle-barrer
+ * request that we want to emit on the kernel_context.
+ */
+ __active_del_barrier(ref, node_from_active(active));
+ RCU_INIT_POINTER(active->request, NULL);
+ INIT_LIST_HEAD(&active->link);
+ } else {
+ if (!i915_active_request_isset(active))
+ atomic_inc(&ref->count);
+ }
+ GEM_BUG_ON(!atomic_read(&ref->count));
__i915_active_request_set(active, rq);
- GEM_BUG_ON(!ref->count);
out:
i915_active_release(ref);
return err;
}
-bool i915_active_acquire(struct i915_active *ref)
+int i915_active_acquire(struct i915_active *ref)
{
- lockdep_assert_held(BKL(ref));
- return !ref->count++;
+ int err;
+
+ debug_active_assert(ref);
+ if (atomic_add_unless(&ref->count, 1, 0))
+ return 0;
+
+ err = mutex_lock_interruptible(&ref->mutex);
+ if (err)
+ return err;
+
+ if (!atomic_read(&ref->count) && ref->active)
+ err = ref->active(ref);
+ if (!err) {
+ debug_active_activate(ref);
+ atomic_inc(&ref->count);
+ }
+
+ mutex_unlock(&ref->mutex);
+
+ return err;
}
void i915_active_release(struct i915_active *ref)
{
- lockdep_assert_held(BKL(ref));
- __active_retire(ref);
+ debug_active_assert(ref);
+ active_retire(ref);
+}
+
+static void __active_ungrab(struct i915_active *ref)
+{
+ clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
+}
+
+bool i915_active_trygrab(struct i915_active *ref)
+{
+ debug_active_assert(ref);
+
+ if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
+ return false;
+
+ if (!atomic_add_unless(&ref->count, 1, 0)) {
+ __active_ungrab(ref);
+ return false;
+ }
+
+ return true;
+}
+
+void i915_active_ungrab(struct i915_active *ref)
+{
+ GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
+
+ active_retire(ref);
+ __active_ungrab(ref);
}
int i915_active_wait(struct i915_active *ref)
{
struct active_node *it, *n;
- int ret = 0;
+ int err;
+
+ might_sleep();
+ might_lock(&ref->mutex);
- if (i915_active_acquire(ref))
- goto out_release;
+ if (i915_active_is_idle(ref))
+ return 0;
+
+ err = mutex_lock_interruptible(&ref->mutex);
+ if (err)
+ return err;
- ret = i915_active_request_retire(&ref->last, BKL(ref));
- if (ret)
- goto out_release;
+ if (!atomic_add_unless(&ref->count, 1, 0)) {
+ mutex_unlock(&ref->mutex);
+ return 0;
+ }
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- ret = i915_active_request_retire(&it->base, BKL(ref));
- if (ret)
+ if (is_barrier(&it->base)) { /* unconnected idle-barrier */
+ err = -EBUSY;
+ break;
+ }
+
+ err = i915_active_request_retire(&it->base, BKL(ref));
+ if (err)
break;
}
-out_release:
- i915_active_release(ref);
- return ret;
+ __active_retire(ref);
+ if (err)
+ return err;
+
+ if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
+ return -EINTR;
+
+ if (!i915_active_is_idle(ref))
+ return -EBUSY;
+
+ return 0;
}
int i915_request_await_active_request(struct i915_request *rq,
@@ -236,23 +451,24 @@ int i915_request_await_active_request(struct i915_request *rq,
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
struct active_node *it, *n;
- int err = 0;
+ int err;
- /* await allocates and so we need to avoid hitting the shrinker */
- if (i915_active_acquire(ref))
- goto out; /* was idle */
+ if (RB_EMPTY_ROOT(&ref->tree))
+ return 0;
- err = i915_request_await_active_request(rq, &ref->last);
+ /* await allocates and so we need to avoid hitting the shrinker */
+ err = i915_active_acquire(ref);
if (err)
- goto out;
+ return err;
+ mutex_lock(&ref->mutex);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
err = i915_request_await_active_request(rq, &it->base);
if (err)
- goto out;
+ break;
}
+ mutex_unlock(&ref->mutex);
-out:
i915_active_release(ref);
return err;
}
@@ -260,53 +476,170 @@ out:
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref)
{
- GEM_BUG_ON(i915_active_request_isset(&ref->last));
+ debug_active_fini(ref);
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
- GEM_BUG_ON(ref->count);
+ GEM_BUG_ON(atomic_read(&ref->count));
+ mutex_destroy(&ref->mutex);
}
#endif
+static inline bool is_idle_barrier(struct active_node *node, u64 idx)
+{
+ return node->timeline == idx && !i915_active_request_isset(&node->base);
+}
+
+static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
+{
+ struct rb_node *prev, *p;
+
+ if (RB_EMPTY_ROOT(&ref->tree))
+ return NULL;
+
+ mutex_lock(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /*
+ * Try to reuse any existing barrier nodes already allocated for this
+ * i915_active, due to overlapping active phases there is likely a
+ * node kept alive (as we reuse before parking). We prefer to reuse
+ * completely idle barriers (less hassle in manipulating the llists),
+ * but otherwise any will do.
+ */
+ if (ref->cache && is_idle_barrier(ref->cache, idx)) {
+ p = &ref->cache->node;
+ goto match;
+ }
+
+ prev = NULL;
+ p = ref->tree.rb_node;
+ while (p) {
+ struct active_node *node =
+ rb_entry(p, struct active_node, node);
+
+ if (is_idle_barrier(node, idx))
+ goto match;
+
+ prev = p;
+ if (node->timeline < idx)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
+
+ /*
+ * No quick match, but we did find the leftmost rb_node for the
+ * kernel_context. Walk the rb_tree in-order to see if there were
+ * any idle-barriers on this timeline that we missed, or just use
+ * the first pending barrier.
+ */
+ for (p = prev; p; p = rb_next(p)) {
+ struct active_node *node =
+ rb_entry(p, struct active_node, node);
+ struct intel_engine_cs *engine;
+
+ if (node->timeline > idx)
+ break;
+
+ if (node->timeline < idx)
+ continue;
+
+ if (is_idle_barrier(node, idx))
+ goto match;
+
+ /*
+ * The list of pending barriers is protected by the
+ * kernel_context timeline, which notably we do not hold
+ * here. i915_request_add_active_barriers() may consume
+ * the barrier before we claim it, so we have to check
+ * for success.
+ */
+ engine = __barrier_to_engine(node);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (is_barrier(&node->base) &&
+ ____active_del_barrier(ref, node, engine))
+ goto match;
+ }
+
+ mutex_unlock(&ref->mutex);
+
+ return NULL;
+
+match:
+ rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
+ if (p == &ref->cache->node)
+ ref->cache = NULL;
+ mutex_unlock(&ref->mutex);
+
+ return rb_entry(p, struct active_node, node);
+}
+
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ intel_engine_mask_t tmp, mask = engine->mask;
struct llist_node *pos, *next;
- unsigned long tmp;
int err;
- GEM_BUG_ON(!engine->mask);
- for_each_engine_masked(engine, i915, engine->mask, tmp) {
- struct intel_context *kctx = engine->kernel_context;
+ GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+
+ /*
+ * Preallocate a node for each physical engine supporting the target
+ * engine (remember virtual engines have more than one sibling).
+ * We can then use the preallocated nodes in
+ * i915_active_acquire_barrier()
+ */
+ for_each_engine_masked(engine, i915, mask, tmp) {
+ u64 idx = engine->kernel_context->timeline->fence_context;
struct active_node *node;
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
- if (unlikely(!node)) {
- err = -ENOMEM;
- goto unwind;
+ node = reuse_idle_barrier(ref, idx);
+ if (!node) {
+ node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (!node) {
+ err = ENOMEM;
+ goto unwind;
+ }
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ node->base.lock =
+ &engine->kernel_context->timeline->mutex;
+#endif
+ RCU_INIT_POINTER(node->base.request, NULL);
+ node->base.retire = node_retire;
+ node->timeline = idx;
+ node->ref = ref;
}
- i915_active_request_init(&node->base,
- (void *)engine, node_retire);
- node->timeline = kctx->ring->timeline->fence_context;
- node->ref = ref;
- ref->count++;
+ if (!i915_active_request_isset(&node->base)) {
+ /*
+ * Mark this as being *our* unconnected proto-node.
+ *
+ * Since this node is not in any list, and we have
+ * decoupled it from the rbtree, we can reuse the
+ * request to indicate this is an idle-barrier node
+ * and then we can use the rb_node and list pointers
+ * for our tracking of the pending barrier.
+ */
+ RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
+ node->base.link.prev = (void *)engine;
+ atomic_inc(&ref->count);
+ }
+ GEM_BUG_ON(barrier_to_engine(node) != engine);
+ llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
intel_engine_pm_get(engine);
- llist_add((struct llist_node *)&node->base.link,
- &ref->barriers);
}
return 0;
unwind:
- llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
- struct active_node *node;
+ llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ struct active_node *node = barrier_from_ll(pos);
- node = container_of((struct list_head *)pos,
- typeof(*node), base.link);
- engine = (void *)rcu_access_pointer(node->base.request);
+ atomic_dec(&ref->count);
+ intel_engine_pm_put(barrier_to_engine(node));
- intel_engine_pm_put(engine);
kmem_cache_free(global.slab_cache, node);
}
return err;
@@ -316,26 +649,29 @@ void i915_active_acquire_barrier(struct i915_active *ref)
{
struct llist_node *pos, *next;
- i915_active_acquire(ref);
+ GEM_BUG_ON(i915_active_is_idle(ref));
- llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
- struct intel_engine_cs *engine;
- struct active_node *node;
+ /*
+ * Transfer the list of preallocated barriers into the
+ * i915_active rbtree, but only as proto-nodes. They will be
+ * populated by i915_request_add_active_barriers() to point to the
+ * request that will eventually release them.
+ */
+ mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ struct active_node *node = barrier_from_ll(pos);
+ struct intel_engine_cs *engine = barrier_to_engine(node);
struct rb_node **p, *parent;
- node = container_of((struct list_head *)pos,
- typeof(*node), base.link);
-
- engine = (void *)rcu_access_pointer(node->base.request);
- RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
-
parent = NULL;
p = &ref->tree.rb_node;
while (*p) {
+ struct active_node *it;
+
parent = *p;
- if (rb_entry(parent,
- struct active_node,
- node)->timeline < node->timeline)
+
+ it = rb_entry(parent, struct active_node, node);
+ if (it->timeline < node->timeline)
p = &parent->rb_right;
else
p = &parent->rb_left;
@@ -343,20 +679,30 @@ void i915_active_acquire_barrier(struct i915_active *ref)
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
- llist_add((struct llist_node *)&node->base.link,
- &engine->barrier_tasks);
+ llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- i915_active_release(ref);
+ mutex_unlock(&ref->mutex);
}
-void i915_request_add_barriers(struct i915_request *rq)
+void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
+ GEM_BUG_ON(intel_engine_is_virtual(engine));
+ GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
+
+ /*
+ * Attach the list of proto-fences to the in-flight request such
+ * that the parent i915_active will be released when this request
+ * is retired.
+ */
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
+ smp_wmb(); /* serialise with reuse_idle_barrier */
list_add_tail((struct list_head *)node, &rq->active_list);
+ }
}
int i915_active_request_set(struct i915_active_request *active,
@@ -364,6 +710,10 @@ int i915_active_request_set(struct i915_active_request *active,
{
int err;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ lockdep_assert_held(active->lock);
+#endif
+
/* Must maintain ordering wrt previous active requests */
err = i915_request_await_active_request(rq, active);
if (err)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index c14eebf6d074..f95058f99057 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -58,15 +58,20 @@ void i915_active_retire_noop(struct i915_active_request *active,
*/
static inline void
i915_active_request_init(struct i915_active_request *active,
+ struct mutex *lock,
struct i915_request *rq,
i915_active_retire_fn retire)
{
RCU_INIT_POINTER(active->request, rq);
INIT_LIST_HEAD(&active->link);
active->retire = retire ?: i915_active_retire_noop;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ active->lock = lock;
+#endif
}
-#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
+#define INIT_ACTIVE_REQUEST(name, lock) \
+ i915_active_request_init((name), (lock), NULL, NULL)
/**
* i915_active_request_set - updates the tracker to watch the current request
@@ -81,6 +86,9 @@ static inline void
__i915_active_request_set(struct i915_active_request *active,
struct i915_request *request)
{
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ lockdep_assert_held(active->lock);
+#endif
list_move(&active->link, &request->active_list);
rcu_assign_pointer(active->request, request);
}
@@ -90,25 +98,6 @@ i915_active_request_set(struct i915_active_request *active,
struct i915_request *rq);
/**
- * i915_active_request_set_retire_fn - updates the retirement callback
- * @active - the active tracker
- * @fn - the routine called when the request is retired
- * @mutex - struct_mutex used to guard retirements
- *
- * i915_active_request_set_retire_fn() updates the function pointer that
- * is called when the final request associated with the @active tracker
- * is retired.
- */
-static inline void
-i915_active_request_set_retire_fn(struct i915_active_request *active,
- i915_active_retire_fn fn,
- struct mutex *mutex)
-{
- lockdep_assert_held(mutex);
- active->retire = fn ?: i915_active_retire_noop;
-}
-
-/**
* i915_active_request_raw - return the active request
* @active - the active tracker
*
@@ -369,12 +358,19 @@ i915_active_request_retire(struct i915_active_request *active,
* synchronisation.
*/
-void i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
- void (*retire)(struct i915_active *ref));
+void __i915_active_init(struct drm_i915_private *i915,
+ struct i915_active *ref,
+ int (*active)(struct i915_active *ref),
+ void (*retire)(struct i915_active *ref),
+ struct lock_class_key *key);
+#define i915_active_init(i915, ref, active, retire) do { \
+ static struct lock_class_key __key; \
+ \
+ __i915_active_init(i915, ref, active, retire, &__key); \
+} while (0)
int i915_active_ref(struct i915_active *ref,
- u64 timeline,
+ struct intel_timeline *tl,
struct i915_request *rq);
int i915_active_wait(struct i915_active *ref);
@@ -384,20 +380,17 @@ int i915_request_await_active(struct i915_request *rq,
int i915_request_await_active_request(struct i915_request *rq,
struct i915_active_request *active);
-bool i915_active_acquire(struct i915_active *ref);
-
-static inline void i915_active_cancel(struct i915_active *ref)
-{
- GEM_BUG_ON(ref->count != 1);
- ref->count = 0;
-}
-
+int i915_active_acquire(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
+void __i915_active_release_nested(struct i915_active *ref, int subclass);
+
+bool i915_active_trygrab(struct i915_active *ref);
+void i915_active_ungrab(struct i915_active *ref);
static inline bool
i915_active_is_idle(const struct i915_active *ref)
{
- return !ref->count;
+ return !atomic_read(&ref->count);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -409,6 +402,6 @@ static inline void i915_active_fini(struct i915_active *ref) { }
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine);
void i915_active_acquire_barrier(struct i915_active *ref);
-void i915_request_add_barriers(struct i915_request *rq);
+void i915_request_add_active_barriers(struct i915_request *rq);
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index c025991b9233..1854e7d168c1 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -7,7 +7,9 @@
#ifndef _I915_ACTIVE_TYPES_H_
#define _I915_ACTIVE_TYPES_H_
+#include <linux/atomic.h>
#include <linux/llist.h>
+#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
@@ -22,18 +24,40 @@ struct i915_active_request {
struct i915_request __rcu *request;
struct list_head link;
i915_active_retire_fn retire;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ /*
+ * Incorporeal!
+ *
+ * Updates to the i915_active_request must be serialised under a lock
+ * to ensure that the timeline is ordered. Normally, this is the
+ * timeline->mutex, but another mutex may be used so long as it is
+ * done so consistently.
+ *
+ * For lockdep tracking of the above, we store the lock we intend
+ * to always use for updates of this i915_active_request during
+ * construction and assert that is held on every update.
+ */
+ struct mutex *lock;
+#endif
};
+struct active_node;
+
struct i915_active {
struct drm_i915_private *i915;
+ struct active_node *cache;
struct rb_root tree;
- struct i915_active_request last;
- unsigned int count;
+ struct mutex mutex;
+ atomic_t count;
+
+ unsigned long flags;
+#define I915_ACTIVE_GRAB_BIT 0
+ int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
- struct llist_head barriers;
+ struct llist_head preallocated_barriers;
};
#endif /* _I915_ACTIVE_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
new file mode 100644
index 000000000000..fe1871d7c126
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kmemleak.h>
+#include <linux/slab.h>
+
+#include "i915_buddy.h"
+
+#include "i915_gem.h"
+#include "i915_globals.h"
+#include "i915_utils.h"
+
+static struct i915_global_block {
+ struct i915_global base;
+ struct kmem_cache *slab_blocks;
+} global;
+
+static void i915_global_buddy_shrink(void)
+{
+ kmem_cache_shrink(global.slab_blocks);
+}
+
+static void i915_global_buddy_exit(void)
+{
+ kmem_cache_destroy(global.slab_blocks);
+}
+
+static struct i915_global_block global = { {
+ .shrink = i915_global_buddy_shrink,
+ .exit = i915_global_buddy_exit,
+} };
+
+int __init i915_global_buddy_init(void)
+{
+ global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_blocks)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
+ unsigned int order,
+ u64 offset)
+{
+ struct i915_buddy_block *block;
+
+ block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ block->header = offset;
+ block->header |= order;
+ block->parent = parent;
+
+ return block;
+}
+
+static void i915_block_free(struct i915_buddy_block *block)
+{
+ kmem_cache_free(global.slab_blocks, block);
+}
+
+static void mark_allocated(struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_ALLOCATED;
+
+ list_del(&block->link);
+}
+
+static void mark_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_FREE;
+
+ list_add(&block->link,
+ &mm->free_list[i915_buddy_block_order(block)]);
+}
+
+static void mark_split(struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_SPLIT;
+
+ list_del(&block->link);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
+{
+ unsigned int i;
+ u64 offset;
+
+ if (size < chunk_size)
+ return -EINVAL;
+
+ if (chunk_size < PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(chunk_size))
+ return -EINVAL;
+
+ size = round_down(size, chunk_size);
+
+ mm->size = size;
+ mm->chunk_size = chunk_size;
+ mm->max_order = ilog2(size) - ilog2(chunk_size);
+
+ GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
+
+ mm->free_list = kmalloc_array(mm->max_order + 1,
+ sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!mm->free_list)
+ return -ENOMEM;
+
+ for (i = 0; i <= mm->max_order; ++i)
+ INIT_LIST_HEAD(&mm->free_list[i]);
+
+ mm->n_roots = hweight64(size);
+
+ mm->roots = kmalloc_array(mm->n_roots,
+ sizeof(struct i915_buddy_block *),
+ GFP_KERNEL);
+ if (!mm->roots)
+ goto out_free_list;
+
+ offset = 0;
+ i = 0;
+
+ /*
+ * Split into power-of-two blocks, in case we are given a size that is
+ * not itself a power-of-two.
+ */
+ do {
+ struct i915_buddy_block *root;
+ unsigned int order;
+ u64 root_size;
+
+ root_size = rounddown_pow_of_two(size);
+ order = ilog2(root_size) - ilog2(chunk_size);
+
+ root = i915_block_alloc(NULL, order, offset);
+ if (!root)
+ goto out_free_roots;
+
+ mark_free(mm, root);
+
+ GEM_BUG_ON(i > mm->max_order);
+ GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
+
+ mm->roots[i] = root;
+
+ offset += root_size;
+ size -= root_size;
+ i++;
+ } while (size);
+
+ return 0;
+
+out_free_roots:
+ while (i--)
+ i915_block_free(mm->roots[i]);
+ kfree(mm->roots);
+out_free_list:
+ kfree(mm->free_list);
+ return -ENOMEM;
+}
+
+void i915_buddy_fini(struct i915_buddy_mm *mm)
+{
+ int i;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
+ i915_block_free(mm->roots[i]);
+ }
+
+ kfree(mm->roots);
+ kfree(mm->free_list);
+}
+
+static int split_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ unsigned int block_order = i915_buddy_block_order(block) - 1;
+ u64 offset = i915_buddy_block_offset(block);
+
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+ GEM_BUG_ON(!i915_buddy_block_order(block));
+
+ block->left = i915_block_alloc(block, block_order, offset);
+ if (!block->left)
+ return -ENOMEM;
+
+ block->right = i915_block_alloc(block, block_order,
+ offset + (mm->chunk_size << block_order));
+ if (!block->right) {
+ i915_block_free(block->left);
+ return -ENOMEM;
+ }
+
+ mark_free(mm, block->left);
+ mark_free(mm, block->right);
+
+ mark_split(block);
+
+ return 0;
+}
+
+static struct i915_buddy_block *
+get_buddy(struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *parent;
+
+ parent = block->parent;
+ if (!parent)
+ return NULL;
+
+ if (parent->left == block)
+ return parent->right;
+
+ return parent->left;
+}
+
+static void __i915_buddy_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *parent;
+
+ while ((parent = block->parent)) {
+ struct i915_buddy_block *buddy;
+
+ buddy = get_buddy(block);
+
+ if (!i915_buddy_block_is_free(buddy))
+ break;
+
+ list_del(&buddy->link);
+
+ i915_block_free(block);
+ i915_block_free(buddy);
+
+ block = parent;
+ }
+
+ mark_free(mm, block);
+}
+
+void i915_buddy_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
+ __i915_buddy_free(mm, block);
+}
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
+{
+ struct i915_buddy_block *block, *on;
+
+ list_for_each_entry_safe(block, on, objects, link)
+ i915_buddy_free(mm, block);
+ INIT_LIST_HEAD(objects);
+}
+
+/*
+ * Allocate power-of-two block. The order value here translates to:
+ *
+ * 0 = 2^0 * mm->chunk_size
+ * 1 = 2^1 * mm->chunk_size
+ * 2 = 2^2 * mm->chunk_size
+ * ...
+ */
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
+{
+ struct i915_buddy_block *block = NULL;
+ unsigned int i;
+ int err;
+
+ for (i = order; i <= mm->max_order; ++i) {
+ block = list_first_entry_or_null(&mm->free_list[i],
+ struct i915_buddy_block,
+ link);
+ if (block)
+ break;
+ }
+
+ if (!block)
+ return ERR_PTR(-ENOSPC);
+
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+
+ while (i != order) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto out_free;
+
+ /* Go low */
+ block = block->left;
+ i--;
+ }
+
+ mark_allocated(block);
+ kmemleak_update_trace(block);
+ return block;
+
+out_free:
+ __i915_buddy_free(mm, block);
+ return ERR_PTR(err);
+}
+
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+/*
+ * Allocate range. Note that it's safe to chain together multiple alloc_ranges
+ * with the same blocks list.
+ *
+ * Intended for pre-allocating portions of the address space, for example to
+ * reserve a block for the initial framebuffer or similar, hence the expectation
+ * here is that i915_buddy_alloc() is still the main vehicle for
+ * allocations, so if that's not the case then the drm_mm range allocator is
+ * probably a much better fit, and so you should probably go use that instead.
+ */
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 start, u64 size)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_block *buddy;
+ LIST_HEAD(allocated);
+ LIST_HEAD(dfs);
+ u64 end;
+ int err;
+ int i;
+
+ if (size < mm->chunk_size)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(size | start, mm->chunk_size))
+ return -EINVAL;
+
+ if (range_overflows(start, size, mm->size))
+ return -EINVAL;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ end = start + size - 1;
+
+ do {
+ u64 block_start;
+ u64 block_end;
+
+ block = list_first_entry_or_null(&dfs,
+ struct i915_buddy_block,
+ tmp_link);
+ if (!block)
+ break;
+
+ list_del(&block->tmp_link);
+
+ block_start = i915_buddy_block_offset(block);
+ block_end = block_start + i915_buddy_block_size(mm, block) - 1;
+
+ if (!overlaps(start, end, block_start, block_end))
+ continue;
+
+ if (i915_buddy_block_is_allocated(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ if (contains(start, end, block_start, block_end)) {
+ if (!i915_buddy_block_is_free(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ mark_allocated(block);
+ list_add_tail(&block->link, &allocated);
+ continue;
+ }
+
+ if (!i915_buddy_block_is_split(block)) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto err_undo;
+ }
+
+ list_add(&block->right->tmp_link, &dfs);
+ list_add(&block->left->tmp_link, &dfs);
+ } while (1);
+
+ list_splice_tail(&allocated, blocks);
+ return 0;
+
+err_undo:
+ /*
+ * We really don't want to leave around a bunch of split blocks, since
+ * bigger is better, so make sure we merge everything back before we
+ * free the allocated blocks.
+ */
+ buddy = get_buddy(block);
+ if (buddy &&
+ (i915_buddy_block_is_free(block) &&
+ i915_buddy_block_is_free(buddy)))
+ __i915_buddy_free(mm, block);
+
+err_free:
+ i915_buddy_free_list(mm, &allocated);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_buddy.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
new file mode 100644
index 000000000000..ed41f3507cdc
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_BUDDY_H__
+#define __I915_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+
+struct i915_buddy_block {
+#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define I915_BUDDY_ALLOCATED (1 << 10)
+#define I915_BUDDY_FREE (2 << 10)
+#define I915_BUDDY_SPLIT (3 << 10)
+#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(9, 0)
+ u64 header;
+
+ struct i915_buddy_block *left;
+ struct i915_buddy_block *right;
+ struct i915_buddy_block *parent;
+
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through i915_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * i915_buddy_free* ownership is given back to the mm.
+ */
+ struct list_head link;
+ struct list_head tmp_link;
+};
+
+#define I915_BUDDY_MAX_ORDER I915_BUDDY_HEADER_ORDER
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * i915_buddy_alloc* and i915_buddy_free* should suffice.
+ */
+struct i915_buddy_mm {
+ /* Maintain a free list for each order. */
+ struct list_head *free_list;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+ * address space. This gives us a simple way of finding a buddy block
+ * and performing the potentially recursive merge step when freeing a
+ * block. Nodes are either allocated or free, in which case they will
+ * also exist on the respective free list.
+ */
+ struct i915_buddy_block **roots;
+
+ /*
+ * Anything from here is public, and remains static for the lifetime of
+ * the mm. Everything above is considered do-not-touch.
+ */
+ unsigned int n_roots;
+ unsigned int max_order;
+
+ /* Must be at least PAGE_SIZE */
+ u64 chunk_size;
+ u64 size;
+};
+
+static inline u64
+i915_buddy_block_offset(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+i915_buddy_block_order(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+i915_buddy_block_state(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+i915_buddy_block_is_allocated(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
+}
+
+static inline bool
+i915_buddy_block_is_free(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_FREE;
+}
+
+static inline bool
+i915_buddy_block_is_split(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
+}
+
+static inline u64
+i915_buddy_block_size(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ return mm->chunk_size << i915_buddy_block_order(block);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
+
+void i915_buddy_fini(struct i915_buddy_mm *mm);
+
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
+
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 start, u64 size);
+
+void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a28bcd2d7c09..24555102e198 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -28,6 +28,7 @@
#include "gt/intel_engine.h"
#include "i915_drv.h"
+#include "i915_memcpy.h"
/**
* DOC: batch buffer command parser
@@ -1352,11 +1353,10 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv, id) {
+ for_each_uabi_engine(engine, dev_priv) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 62cf34db9280..b0f51591f2e4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbc.h"
#include "display/intel_hdcp.h"
@@ -39,13 +40,14 @@
#include "display/intel_psr.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_reset.h"
+#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
+#include "i915_trace.h"
#include "intel_csr.h"
-#include "intel_drv.h"
-#include "intel_guc_submission.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -75,11 +77,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static char get_active_flag(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_active(obj) ? '*' : ' ';
-}
-
static char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_global ? 'p' : ' ';
@@ -97,7 +94,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return obj->userfault_count ? 'g' : ' ';
+ return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -141,12 +138,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
struct i915_vma *vma;
- unsigned int frontbuffer_bits;
int pin_count = 0;
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
+ seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
- get_active_flag(obj),
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
@@ -216,9 +211,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
}
if (vma->fence)
- seq_printf(m, " , fence: %d%s",
- vma->fence->id,
- i915_active_request_isset(&vma->last_fence) ? "*" : "");
+ seq_printf(m, " , fence: %d", vma->fence->id);
seq_puts(m, ")");
spin_lock(&obj->vma.lock);
@@ -234,17 +227,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
-
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
- if (frontbuffer_bits)
- seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
u64 total, unbound;
- u64 global, shared;
u64 active, inactive;
u64 closed;
};
@@ -255,73 +243,68 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
stats->count++;
stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count))
stats->unbound += obj->base.size;
- if (obj->base.name || obj->base.dma_buf)
- stats->shared += obj->base.size;
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_ggtt(vma)) {
- stats->global += vma->node.size;
- } else {
- if (vma->vm != stats->vm)
+ spin_lock(&obj->vma.lock);
+ if (!stats->vm) {
+ for_each_ggtt_vma(vma, obj) {
+ if (!drm_mm_node_allocated(&vma->node))
continue;
- }
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
+ if (i915_vma_is_active(vma))
+ stats->active += vma->node.size;
+ else
+ stats->inactive += vma->node.size;
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
+ }
+ } else {
+ struct rb_node *p = obj->vma.tree.rb_node;
+
+ while (p) {
+ long cmp;
+
+ vma = rb_entry(p, typeof(*vma), obj_node);
+ cmp = i915_vma_compare(vma, stats->vm, NULL);
+ if (cmp == 0) {
+ if (drm_mm_node_allocated(&vma->node)) {
+ if (i915_vma_is_active(vma))
+ stats->active += vma->node.size;
+ else
+ stats->inactive += vma->node.size;
+
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
+ }
+ break;
+ }
+ if (cmp < 0)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
}
+ spin_unlock(&obj->vma.lock);
return 0;
}
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
- stats.global, \
- stats.shared, \
stats.unbound, \
stats.closed); \
} while (0)
-static void print_batch_pool_stats(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- struct file_stats stats = {};
- enum intel_engine_id id;
- int j;
-
- for_each_engine(engine, dev_priv, id) {
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link)
- per_file_stats(0, obj, &stats);
- }
- }
-
- print_file_stats(m, "[k]batch pool", stats);
-}
-
static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915)
{
@@ -334,10 +317,14 @@ static void print_context_stats(struct seq_file *m,
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- if (ce->state)
- per_file_stats(0, ce->state->obj, &kstats);
- if (ce->ring)
+ intel_context_lock_pinned(ce);
+ if (intel_context_is_pinned(ce)) {
+ if (ce->state)
+ per_file_stats(0,
+ ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
+ }
+ intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -369,8 +356,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_private *i915 = node_to_i915(m->private);
int ret;
- seq_printf(m, "%u shrinkable objects, %llu bytes\n",
+ seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
+ atomic_read(&i915->mm.free_count),
i915->mm.shrink_memory);
seq_putc(m, '\n');
@@ -379,58 +367,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
- print_batch_pool_stats(m, i915);
print_context_stats(m, i915);
mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int total = 0;
- int ret, j;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- for_each_engine(engine, dev_priv, id) {
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
- int count;
-
- count = 0;
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link)
- count++;
- seq_printf(m, "%s cache[%d]: %d objects\n",
- engine->name, j, count);
-
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link) {
- seq_puts(m, " ");
- describe_obj(m, obj);
- seq_putc(m, '\n');
- }
-
- total += count;
- }
- }
-
- seq_printf(m, "total: %d\n", total);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static void gen8_display_interrupt_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -487,7 +429,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
- enum intel_engine_id id;
intel_wakeref_t wakeref;
int i, pipe;
@@ -690,7 +631,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
} else if (INTEL_GEN(dev_priv) >= 6) {
- for_each_engine(engine, dev_priv, id) {
+ for_each_uabi_engine(engine, dev_priv) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, ENGINE_READ(engine, RING_IMR));
@@ -711,10 +652,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
rcu_read_lock();
for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ struct i915_vma *vma = reg->vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, i915->ggtt.fence_regs[i].pin_count);
+ i, atomic_read(&reg->pin_count));
if (!vma)
seq_puts(m, "unused");
else
@@ -1080,17 +1022,16 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv,
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
- u64 acthd[I915_NUM_ENGINES];
- struct intel_instdone instdone;
intel_wakeref_t wakeref;
enum intel_engine_id id;
- seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
- if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+ seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
seq_puts(m, "\tWedged\n");
- if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
+ if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
seq_puts(m, "\tDevice (global) reset in progress\n");
if (!i915_modparams.enable_hangcheck) {
@@ -1098,42 +1039,37 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- for_each_engine(engine, dev_priv, id)
- acthd[id] = intel_engine_get_active_head(engine);
-
- intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
- }
-
- if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
+ if (timer_pending(&gt->hangcheck.work.timer))
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
- jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+ jiffies_to_msecs(gt->hangcheck.work.timer.expires -
jiffies));
- else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
+ else if (delayed_work_pending(&gt->hangcheck.work))
seq_puts(m, "Hangcheck active, work pending\n");
else
seq_puts(m, "Hangcheck inactive\n");
- seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
+ seq_printf(m, "GT active? %s\n", yesno(gt->awake));
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s: %d ms ago\n",
- engine->name,
- jiffies_to_msecs(jiffies -
- engine->hangcheck.action_timestamp));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ for_each_engine(engine, i915, id) {
+ struct intel_instdone instdone;
- seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)engine->hangcheck.acthd,
- (long long)acthd[id]);
+ seq_printf(m, "%s: %d ms ago\n",
+ engine->name,
+ jiffies_to_msecs(jiffies -
+ engine->hangcheck.action_timestamp));
- if (engine->id == RCS0) {
- seq_puts(m, "\tinstdone read =\n");
+ seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+ (long long)engine->hangcheck.acthd,
+ intel_engine_get_active_head(engine));
- i915_instdone_info(dev_priv, m, &instdone);
+ intel_engine_get_instdone(engine, &instdone);
- seq_puts(m, "\tinstdone accu =\n");
+ seq_puts(m, "\tinstdone read =\n");
+ i915_instdone_info(i915, m, &instdone);
- i915_instdone_info(dev_priv, m,
+ seq_puts(m, "\tinstdone accu =\n");
+ i915_instdone_info(i915, m,
&engine->hangcheck.instdone);
}
}
@@ -1141,23 +1077,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_reset_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s = %u\n", engine->name,
- i915_reset_engine_count(error, engine));
- }
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1224,7 +1143,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake.count);
+ uncore->user_forcewake_count);
for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
@@ -1517,30 +1436,6 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_emon_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!IS_GEN(i915, 5))
- return -ENODEV;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- unsigned long temp, chipset, gfx;
-
- temp = i915_mch_val(i915);
- chipset = i915_chipset_val(i915);
- gfx = i915_gfx_val(i915);
-
- seq_printf(m, "GMCH temp: %ld\n", temp);
- seq_printf(m, "Chipset power: %ld\n", chipset);
- seq_printf(m, "GFX power: %ld\n", gfx);
- seq_printf(m, "Total power: %ld\n", chipset + gfx);
- }
-
- return 0;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1706,12 +1601,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- describe_obj(m, ce->state->obj);
- if (ce->ring)
+ intel_context_lock_pinned(ce);
+ if (intel_context_is_pinned(ce)) {
+ seq_printf(m, "%s: ", ce->engine->name);
+ if (ce->state)
+ describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
+ seq_putc(m, '\n');
+ }
+ intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -1894,11 +1792,11 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
struct drm_printer p;
- if (!HAS_HUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->huc.fw, &p);
+ intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
@@ -1912,11 +1810,11 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
struct drm_printer p;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->guc.fw, &p);
+ intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 tmp = I915_READ(GUC_STATUS);
@@ -1959,7 +1857,7 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
static void i915_guc_log_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_guc_log *log = &dev_priv->guc.log;
+ struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
if (!intel_guc_log_relay_enabled(log)) {
@@ -1980,32 +1878,11 @@ static void i915_guc_log_info(struct seq_file *m,
}
}
-static void i915_guc_client_info(struct seq_file *m,
- struct drm_i915_private *dev_priv,
- struct intel_guc_client *client)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u64 tot = 0;
-
- seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
- client->priority, client->stage_id, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
-
- for_each_engine(engine, dev_priv, id) {
- u64 submissions = client->submissions[id];
- tot += submissions;
- seq_printf(m, "\tSubmissions: %llu %s\n",
- submissions, engine->name);
- }
- seq_printf(m, "\tTotal: %llu\n", tot);
-}
-
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
+ const struct intel_guc *guc = &dev_priv->gt.uc.guc;
+ struct intel_guc_client *client = guc->execbuf_client;
if (!USES_GUC(dev_priv))
return -ENODEV;
@@ -2021,14 +1898,13 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
- seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
- i915_guc_client_info(m, dev_priv, guc->execbuf_client);
- if (guc->preempt_client) {
- seq_printf(m, "\nGuC preempt client @ %p:\n",
- guc->preempt_client);
- i915_guc_client_info(m, dev_priv, guc->preempt_client);
- }
-
+ seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
+ seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
+ client->priority,
+ client->stage_id,
+ client->proc_desc_offset);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
+ client->doorbell_id, client->doorbell_offset);
/* Add more as required ... */
return 0;
@@ -2037,10 +1913,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
+ const struct intel_guc *guc = &dev_priv->gt.uc.guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
- struct intel_guc_client *client = guc->execbuf_client;
- intel_engine_mask_t tmp;
int index;
if (!USES_GUC_SUBMISSION(dev_priv))
@@ -2069,7 +1943,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
desc->wq_addr, desc->wq_size);
seq_putc(m, '\n');
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
+ for_each_uabi_engine(engine, dev_priv) {
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc =
&desc->lrc[guc_engine_id];
@@ -2097,13 +1971,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
u32 *log;
int i = 0;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
if (dump_load_err)
- obj = dev_priv->guc.load_err_log;
- else if (dev_priv->guc.log.vma)
- obj = dev_priv->guc.log.vma->obj;
+ obj = dev_priv->gt.uc.load_err_log;
+ else if (dev_priv->gt.uc.guc.log.vma)
+ obj = dev_priv->gt.uc.guc.log.vma->obj;
if (!obj)
return 0;
@@ -2134,7 +2008,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->guc.log);
+ *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
return 0;
}
@@ -2146,7 +2020,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->guc.log, val);
+ return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2155,14 +2029,16 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct drm_i915_private *i915 = inode->i_private;
+ struct intel_guc *guc = &i915->gt.uc.guc;
+ struct intel_guc_log *log = &guc->log;
- if (!USES_GUC(dev_priv))
+ if (!intel_guc_is_running(guc))
return -ENODEV;
- file->private_data = &dev_priv->guc.log;
+ file->private_data = log;
- return intel_guc_log_relay_open(&dev_priv->guc.log);
+ return intel_guc_log_relay_open(log);
}
static ssize_t
@@ -2174,16 +2050,15 @@ i915_guc_log_relay_write(struct file *filp,
struct intel_guc_log *log = filp->private_data;
intel_guc_log_relay_flush(log);
-
return cnt;
}
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- intel_guc_log_relay_close(&dev_priv->guc.log);
+ struct drm_i915_private *i915 = inode->i_private;
+ struct intel_guc *guc = &i915->gt.uc.guc;
+ intel_guc_log_relay_close(&guc->log);
return 0;
}
@@ -2485,7 +2360,8 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(power_domain),
+ intel_display_power_domain_str(dev_priv,
+ power_domain),
power_domains->domain_use_count[power_domain]);
}
@@ -2499,6 +2375,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
struct intel_csr *csr;
+ i915_reg_t dc5_reg, dc6_reg = {};
if (!HAS_CSR(dev_priv))
return -ENODEV;
@@ -2516,15 +2393,19 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (WARN_ON(INTEL_GEN(dev_priv) > 11))
- goto out;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ } else {
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT;
+ if (!IS_GEN9_LP(dev_priv))
+ dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ }
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT));
- if (!IS_GEN9_LP(dev_priv))
- seq_printf(m, "DC5 -> DC6 count: %d\n",
- I915_READ(SKL_CSR_DC5_DC6_COUNT));
+ seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
+ if (dc6_reg.reg)
+ seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -2603,6 +2484,25 @@ static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
intel_seq_print_mode(m, 2, mode);
}
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool hdcp_cap, hdcp2_cap;
+
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+ seq_puts(m, "\n");
+}
+
static void intel_dp_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
@@ -2616,6 +2516,10 @@ static void intel_dp_info(struct seq_file *m,
drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
&intel_dp->aux);
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
}
static void intel_dp_mst_info(struct seq_file *m,
@@ -2639,6 +2543,10 @@ static void intel_hdmi_info(struct seq_file *m,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
}
static void intel_lvds_info(struct seq_file *m,
@@ -2874,7 +2782,6 @@ static int i915_engine_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
- enum intel_engine_id id;
struct drm_printer p;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -2886,7 +2793,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
- for_each_engine(engine, dev_priv, id)
+ for_each_uabi_engine(engine, dev_priv)
intel_engine_dump(engine, &p, "%s\n", engine->name);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -2966,14 +2873,27 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
- struct i915_wa *wa;
- unsigned int i;
+ struct intel_engine_cs *engine;
+
+ for_each_uabi_engine(engine, i915) {
+ const struct i915_wa_list *wal = &engine->ctx_wa_list;
+ const struct i915_wa *wa;
+ unsigned int count;
- seq_printf(m, "Workarounds applied: %u\n", wal->count);
- for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
+ count = wal->count;
+ if (!count)
+ continue;
+
+ seq_printf(m, "%s: Workarounds applied: %u\n",
+ engine->name, count);
+
+ for (wa = wal->list; count--; wa++)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ i915_mmio_reg_offset(wa->reg),
+ wa->val, wa->mask);
+
+ seq_printf(m, "\n");
+ }
return 0;
}
@@ -3620,7 +3540,8 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
- int ret = i915_terminally_wedged(data);
+ struct drm_i915_private *i915 = data;
+ int ret = intel_gt_terminally_wedged(&i915->gt);
switch (ret) {
case -EIO:
@@ -3640,11 +3561,11 @@ i915_wedged_set(void *data, u64 val)
struct drm_i915_private *i915 = data;
/* Flush any previous reset before applying for a new one */
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
+ wait_event(i915->gt.reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
- i915_handle_error(i915, val, I915_ERROR_CAPTURE,
- "Manually set wedged engine mask = %llx", val);
+ intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
+ "Manually set wedged engine mask = %llx", val);
return 0;
}
@@ -3687,8 +3608,9 @@ i915_drop_caches_set(void *data, u64 val)
val, val & DROP_ALL);
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
- i915_gem_set_wedged(i915);
+ wait_for(intel_engines_are_idle(&i915->gt),
+ I915_IDLE_ENGINES_TIMEOUT))
+ intel_gt_set_wedged(&i915->gt);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
@@ -3721,10 +3643,13 @@ i915_drop_caches_set(void *data, u64 val)
i915_retire_requests(i915);
mutex_unlock(&i915->drm.struct_mutex);
+
+ if (ret == 0 && val & DROP_IDLE)
+ ret = intel_gt_pm_wait_for_idle(&i915->gt);
}
- if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
- i915_handle_error(i915, ALL_ENGINES, 0, NULL);
+ if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
+ intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
@@ -4087,9 +4012,9 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
flush_work(&dev_priv->hotplug.dig_port_work);
- flush_work(&dev_priv->hotplug.hotplug_work);
+ flush_delayed_work(&dev_priv->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -4370,7 +4295,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
@@ -4379,9 +4303,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
- {"i915_reset_info", i915_reset_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
- {"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_fbc_status", i915_fbc_status, 0},
@@ -4547,7 +4469,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_connector *intel_connector = to_intel_connector(connector);
- bool hdcp_cap, hdcp2_cap;
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4558,17 +4479,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
- hdcp_cap = intel_hdcp_capable(intel_connector);
- hdcp2_cap = intel_hdcp2_capable(intel_connector);
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
- seq_puts(m, "\n");
+ intel_hdcp_info(m, intel_connector);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bac1ee94f63f..020696726f9e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -51,6 +51,7 @@
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
#include "display/intel_gmbus.h"
@@ -61,436 +62,85 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_reset.h"
-#include "gt/intel_workarounds.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_pmu.h"
+#include "i915_memcpy.h"
+#include "i915_perf.h"
#include "i915_query.h"
+#include "i915_suspend.h"
+#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_csr.h"
-#include "intel_drv.h"
#include "intel_pm.h"
-#include "intel_uc.h"
static struct drm_driver driver;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-static unsigned int i915_load_fail_count;
-
-bool __i915_inject_load_failure(const char *func, int line)
-{
- if (i915_load_fail_count >= i915_modparams.inject_load_failure)
- return false;
-
- if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
- DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
- i915_modparams.inject_load_failure, func, line);
- i915_modparams.inject_load_failure = 0;
- return true;
- }
-
- return false;
-}
-
-bool i915_error_injected(void)
-{
- return i915_load_fail_count && !i915_modparams.inject_load_failure;
-}
-
-#endif
-
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
- "providing the dmesg log by booting with drm.debug=0xf"
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...)
-{
- static bool shown_bug_once;
- struct device *kdev = dev_priv->drm.dev;
- bool is_error = level[1] <= KERN_ERR[1];
- bool is_debug = level[1] == KERN_DEBUG[1];
- struct va_format vaf;
- va_list args;
-
- if (is_debug && !(drm_debug & DRM_UT_DRIVER))
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (is_error)
- dev_printk(level, kdev, "%pV", &vaf);
- else
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
-
- va_end(args);
-
- if (is_error && !shown_bug_once) {
- /*
- * Ask the user to file a bug report for the error, except
- * if they may have caused the bug by fiddling with unsafe
- * module parameters.
- */
- if (!test_taint(TAINT_USER))
- dev_notice(kdev, "%s", FDO_BUG_MSG);
- shown_bug_once = true;
- }
-}
-
-/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
-static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
-{
- switch (id) {
- case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 5));
- return PCH_IBX;
- case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
- return PCH_CPT;
- case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
- /* PantherPoint is CPT compatible */
- return PCH_CPT;
- case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- return PCH_LPT;
- case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- return PCH_LPT;
- case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
- return PCH_LPT;
- case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
- return PCH_LPT;
- case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- /* KBP is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CMP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
- /* CometPoint is CNP Compatible */
- return PCH_CNP;
- case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ice Lake PCH\n");
- WARN_ON(!IS_ICELAKE(dev_priv));
- return PCH_ICP;
- case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
- return PCH_MCC;
- default:
- return PCH_NONE;
- }
-}
-
-static bool intel_is_virt_pch(unsigned short id,
- unsigned short svendor, unsigned short sdevice)
-{
- return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- sdevice == PCI_SUBDEVICE_ID_QEMU));
-}
-
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
-{
- unsigned short id = 0;
-
- /*
- * In a virtualized passthrough environment we can be in a
- * setup where the ISA bridge is not able to be passed through.
- * In this case, a south bridge can be emulated and we have to
- * make an educated guess as to which PCH is really there.
- */
-
- if (IS_ELKHARTLAKE(dev_priv))
- id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
- id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 5))
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-
- if (id)
- DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
- else
- DRM_DEBUG_KMS("Assuming no PCH\n");
-
- return id;
-}
-
-static void intel_detect_pch(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pch = NULL;
-
- /*
- * The reason to probe ISA bridge instead of Dev31:Fun0 is to
- * make graphics device passthrough work easy for VMM, that only
- * need to expose ISA bridge to let driver know the real hardware
- * underneath. This is a requirement from virtualization team.
- *
- * In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan trhough
- * all the ISA bridge devices and check for the first match, instead
- * of only checking the first one.
- */
- while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- unsigned short id;
- enum intel_pch pch_type;
-
- if (pch->vendor != PCI_VENDOR_ID_INTEL)
- continue;
-
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-
- pch_type = intel_pch_type(dev_priv, id);
- if (pch_type != PCH_NONE) {
- dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
- break;
- } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
- pch->subsystem_device)) {
- id = intel_virt_detect_pch(dev_priv);
- pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (WARN_ON(id && pch_type == PCH_NONE))
- id = 0;
-
- dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
- break;
- }
- }
-
- /*
- * Use PCH_NOP (PCH but no South Display) for PCH platforms without
- * display.
- */
- if (pch && !HAS_DISPLAY(dev_priv)) {
- DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
- dev_priv->pch_type = PCH_NOP;
- dev_priv->pch_id = 0;
- }
-
- if (!pch)
- DRM_DEBUG_KMS("No PCH found.\n");
-
- pci_dev_put(pch);
-}
-
-static int i915_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- drm_i915_getparam_t *param = data;
- int value;
-
- switch (param->param) {
- case I915_PARAM_IRQ_ACTIVE:
- case I915_PARAM_ALLOW_BATCHBUFFER:
- case I915_PARAM_LAST_DISPATCH:
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- /* Reject all old ums/dri params. */
- return -ENODEV;
- case I915_PARAM_CHIPSET_ID:
- value = pdev->device;
- break;
- case I915_PARAM_REVISION:
- value = pdev->revision;
- break;
- case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->ggtt.num_fences;
- break;
- case I915_PARAM_HAS_OVERLAY:
- value = dev_priv->overlay ? 1 : 0;
- break;
- case I915_PARAM_HAS_BSD:
- value = !!dev_priv->engine[VCS0];
- break;
- case I915_PARAM_HAS_BLT:
- value = !!dev_priv->engine[BCS0];
- break;
- case I915_PARAM_HAS_VEBOX:
- value = !!dev_priv->engine[VECS0];
- break;
- case I915_PARAM_HAS_BSD2:
- value = !!dev_priv->engine[VCS1];
- break;
- case I915_PARAM_HAS_LLC:
- value = HAS_LLC(dev_priv);
- break;
- case I915_PARAM_HAS_WT:
- value = HAS_WT(dev_priv);
- break;
- case I915_PARAM_HAS_ALIASING_PPGTT:
- value = INTEL_PPGTT(dev_priv);
- break;
- case I915_PARAM_HAS_SEMAPHORES:
- value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
- break;
- case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
- break;
- case I915_PARAM_CMD_PARSER_VERSION:
- value = i915_cmd_parser_get_version(dev_priv);
- break;
- case I915_PARAM_SUBSLICE_TOTAL:
- value = intel_sseu_subslice_total(sseu);
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_EU_TOTAL:
- value = sseu->eu_total;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_HAS_GPU_RESET:
- value = i915_modparams.enable_hangcheck &&
- intel_has_gpu_reset(dev_priv);
- if (value && intel_has_reset_engine(dev_priv))
- value = 2;
- break;
- case I915_PARAM_HAS_RESOURCE_STREAMER:
- value = 0;
- break;
- case I915_PARAM_HAS_POOLED_EU:
- value = HAS_POOLED_EU(dev_priv);
- break;
- case I915_PARAM_MIN_EU_IN_POOL:
- value = sseu->min_eu_in_pool;
- break;
- case I915_PARAM_HUC_STATUS:
- value = intel_huc_check_status(&dev_priv->huc);
- if (value < 0)
- return value;
- break;
- case I915_PARAM_MMAP_GTT_VERSION:
- /* Though we've started our numbering from 1, and so class all
- * earlier versions as 0, in effect their value is undefined as
- * the ioctl will report EINVAL for the unknown param!
- */
- value = i915_gem_mmap_gtt_version();
- break;
- case I915_PARAM_HAS_SCHEDULER:
- value = dev_priv->caps.scheduler;
- break;
-
- case I915_PARAM_MMAP_VERSION:
- /* Remember to bump this if the version changes! */
- case I915_PARAM_HAS_GEM:
- case I915_PARAM_HAS_PAGEFLIPPING:
- case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
- case I915_PARAM_HAS_RELAXED_FENCING:
- case I915_PARAM_HAS_COHERENT_RINGS:
- case I915_PARAM_HAS_RELAXED_DELTA:
- case I915_PARAM_HAS_GEN7_SOL_RESET:
- case I915_PARAM_HAS_WAIT_TIMEOUT:
- case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
- case I915_PARAM_HAS_PINNED_BATCHES:
- case I915_PARAM_HAS_EXEC_NO_RELOC:
- case I915_PARAM_HAS_EXEC_HANDLE_LUT:
- case I915_PARAM_HAS_COHERENT_PHYS_GTT:
- case I915_PARAM_HAS_EXEC_SOFTPIN:
- case I915_PARAM_HAS_EXEC_ASYNC:
- case I915_PARAM_HAS_EXEC_FENCE:
- case I915_PARAM_HAS_EXEC_CAPTURE:
- case I915_PARAM_HAS_EXEC_BATCH_FIRST:
- case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
- case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
- /* For the time being all of these are always true;
- * if some supported hardware does not have one of these
- * features this value needs to be provided from
- * INTEL_INFO(), a feature macro, or similar.
- */
- value = 1;
- break;
- case I915_PARAM_HAS_CONTEXT_ISOLATION:
- value = intel_engines_has_context_isolation(dev_priv);
- break;
- case I915_PARAM_SLICE_MASK:
- value = sseu->slice_mask;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_SUBSLICE_MASK:
- value = sseu->subslice_mask[0];
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
- break;
- case I915_PARAM_MMAP_GTT_COHERENT:
- value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
- break;
- default:
- DRM_DEBUG("Unknown parameter %d\n", param->param);
- return -EINVAL;
- }
-
- if (put_user(value, param->value))
- return -EFAULT;
-
- return 0;
-}
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 pcbr;
+ u32 clock_gate_dis2;
+};
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
@@ -632,39 +282,45 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static int i915_resume_switcheroo(struct drm_device *dev);
-static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
+static int i915_resume_switcheroo(struct drm_i915_private *i915);
+static int i915_suspend_switcheroo(struct drm_i915_private *i915,
+ pm_message_t state);
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (!i915) {
+ dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
+ return;
+ }
+
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(pdev, PCI_D0);
- i915_resume_switcheroo(dev);
- dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ i915_resume_switcheroo(i915);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_info("switched off\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(dev, pmm);
- dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(i915, pmm);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return i915 && i915->drm.open_count == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
@@ -673,13 +329,13 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static int i915_load_modeset_init(struct drm_device *dev)
+static int i915_driver_modeset_probe(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (HAS_DISPLAY(dev_priv)) {
@@ -749,16 +405,16 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
i915_gem_suspend(dev_priv);
- i915_gem_fini_hw(dev_priv);
- i915_gem_fini(dev_priv);
+ i915_gem_driver_remove(dev_priv);
+ i915_gem_driver_release(dev_priv);
cleanup_modeset:
- intel_modeset_cleanup(dev);
+ intel_modeset_driver_remove(dev);
cleanup_irq:
- drm_irq_uninstall(dev);
+ intel_irq_uninstall(dev_priv);
intel_gmbus_teardown(dev_priv);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
- intel_power_domains_fini_hw(dev_priv);
+ intel_power_domains_driver_remove(dev_priv);
vga_switcheroo_unregister_client(pdev);
cleanup_vga_client:
vga_client_register(pdev, NULL, NULL, NULL);
@@ -840,15 +496,6 @@ out_err:
return -ENOMEM;
}
-static void i915_engines_cleanup(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- kfree(engine);
-}
-
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->hotplug.dp_wq);
@@ -881,8 +528,31 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
}
}
+static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
+{
+ if (!IS_VALLEYVIEW(i915))
+ return 0;
+
+ /* we write all the values in the struct, so no need to zero it out */
+ i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
+ GFP_KERNEL);
+ if (!i915->vlv_s0ix_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vlv_free_s0ix_state(struct drm_i915_private *i915)
+{
+ if (!i915->vlv_s0ix_state)
+ return;
+
+ kfree(i915->vlv_s0ix_state);
+ i915->vlv_s0ix_state = NULL;
+}
+
/**
- * i915_driver_init_early - setup state not requiring device access
+ * i915_driver_early_probe - setup state not requiring device access
* @dev_priv: device private
*
* Initialize everything that is a "SW-only" state, that is state not
@@ -891,16 +561,17 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
* system memory allocation, setting up device specific attributes and
* function hooks not requiring accessing the device.
*/
-static int i915_driver_init_early(struct drm_i915_private *dev_priv)
+static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
int ret = 0;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
intel_device_info_subplatform_init(dev_priv);
- intel_uncore_init_early(&dev_priv->uncore);
+ intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
+ intel_uncore_init_early(&dev_priv->uncore, dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -920,24 +591,29 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
- goto err_engines;
+ return ret;
- ret = i915_gem_init_early(dev_priv);
+ ret = vlv_alloc_s0ix_state(dev_priv);
if (ret < 0)
goto err_workqueues;
+ intel_wopcm_init_early(&dev_priv->wopcm);
+
+ intel_gt_init_early(&dev_priv->gt, dev_priv);
+
+ ret = i915_gem_init_early(dev_priv);
+ if (ret < 0)
+ goto err_gt;
+
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
- intel_wopcm_init_early(&dev_priv->wopcm);
- intel_uc_init_early(dev_priv);
intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
ret = intel_power_domains_init(dev_priv);
if (ret < 0)
- goto err_uc;
+ goto err_gem;
intel_irq_init(dev_priv);
- intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
@@ -947,35 +623,36 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
return 0;
-err_uc:
- intel_uc_cleanup_early(dev_priv);
+err_gem:
i915_gem_cleanup_early(dev_priv);
+err_gt:
+ intel_gt_driver_late_release(&dev_priv->gt);
+ vlv_free_s0ix_state(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
-err_engines:
- i915_engines_cleanup(dev_priv);
return ret;
}
/**
- * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * i915_driver_late_release - cleanup the setup done in
+ * i915_driver_early_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
- intel_uc_cleanup_early(dev_priv);
i915_gem_cleanup_early(dev_priv);
+ intel_gt_driver_late_release(&dev_priv->gt);
+ vlv_free_s0ix_state(dev_priv);
i915_workqueues_cleanup(dev_priv);
- i915_engines_cleanup(dev_priv);
pm_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
}
/**
- * i915_driver_init_mmio - setup device MMIO
+ * i915_driver_mmio_probe - setup device MMIO
* @dev_priv: device private
*
* Setup minimal device state necessary for MMIO accesses later in the
@@ -983,11 +660,11 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
* side effects or exposing the driver via kernel internal or user space
* interfaces.
*/
-static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (i915_get_bridge_dev(dev_priv))
@@ -1004,7 +681,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_uncore_prune_mmio_domains(&dev_priv->uncore);
- intel_uc_init_mmio(dev_priv);
+ intel_uc_init_mmio(&dev_priv->gt.uc);
ret = intel_engines_init_mmio(dev_priv);
if (ret)
@@ -1024,11 +701,12 @@ err_bridge:
}
/**
- * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
+ intel_engines_cleanup(dev_priv);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
@@ -1516,22 +1194,23 @@ static void edram_detect(struct drm_i915_private *dev_priv)
dev_priv->edram_size_mb =
gen9_edram_size_mb(dev_priv, edram_cap);
- DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
+ dev_info(dev_priv->drm.dev,
+ "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}
/**
- * i915_driver_init_hw - setup state requiring device access
+ * i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
*
* Setup state that requires accessing the device, but doesn't require
* exposing the driver via kernel internal or userspace interfaces.
*/
-static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
intel_device_info_runtime_init(dev_priv);
@@ -1590,6 +1269,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
+ intel_gt_init_hw(dev_priv);
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
@@ -1635,7 +1316,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- intel_uncore_sanitize(dev_priv);
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ intel_sanitize_gt_powersave(dev_priv);
intel_gt_init_workarounds(dev_priv);
@@ -1683,17 +1365,17 @@ err_msi:
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
err_ggtt:
- i915_ggtt_cleanup_hw(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
err_perf:
i915_perf_fini(dev_priv);
return ret;
}
/**
- * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -1716,7 +1398,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- i915_gem_shrinker_register(dev_priv);
+ i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
/*
@@ -1796,7 +1478,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_teardown_sysfs(dev_priv);
drm_dev_unplug(&dev_priv->drm);
- i915_gem_shrinker_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
}
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
@@ -1843,9 +1525,10 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.pdev = pdev;
i915->drm.dev_private = i915;
- pci_set_drvdata(pdev, &i915->drm);
+
+ i915->drm.pdev = pdev;
+ pci_set_drvdata(pdev, i915);
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
@@ -1869,17 +1552,17 @@ static void i915_driver_destroy(struct drm_i915_private *i915)
}
/**
- * i915_driver_load - setup chip and create an initial config
+ * i915_driver_probe - setup chip and create an initial config
* @pdev: PCI device
* @ent: matching PCI ID entry
*
- * The driver load routine has to do several things:
+ * The driver probe routine has to do several things:
* - drive output discovery via intel_modeset_init()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
-int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
@@ -1898,21 +1581,23 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_fini;
- ret = i915_driver_init_early(dev_priv);
+ ret = i915_driver_early_probe(dev_priv);
if (ret < 0)
goto out_pci_disable;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- ret = i915_driver_init_mmio(dev_priv);
+ i915_detect_vgpu(dev_priv);
+
+ ret = i915_driver_mmio_probe(dev_priv);
if (ret < 0)
goto out_runtime_pm_put;
- ret = i915_driver_init_hw(dev_priv);
+ ret = i915_driver_hw_probe(dev_priv);
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_load_modeset_init(&dev_priv->drm);
+ ret = i915_driver_modeset_probe(&dev_priv->drm);
if (ret < 0)
goto out_cleanup_hw;
@@ -1925,66 +1610,68 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_hw:
- i915_driver_cleanup_hw(dev_priv);
- i915_ggtt_cleanup_hw(dev_priv);
+ i915_driver_hw_remove(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
+
+ /* Paranoia: make sure we have disabled everything before we exit. */
+ intel_sanitize_gt_powersave(dev_priv);
out_cleanup_mmio:
- i915_driver_cleanup_mmio(dev_priv);
+ i915_driver_mmio_release(dev_priv);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- i915_driver_cleanup_early(dev_priv);
+ i915_driver_late_release(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
out_fini:
- i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+ i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
i915_driver_destroy(dev_priv);
return ret;
}
-void i915_driver_unload(struct drm_device *dev)
+void i915_driver_remove(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_driver_unregister(dev_priv);
+ i915_driver_unregister(i915);
/*
* After unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&i915->gt);
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
- i915_gem_suspend(dev_priv);
+ i915_gem_suspend(i915);
- drm_atomic_helper_shutdown(dev);
+ drm_atomic_helper_shutdown(&i915->drm);
- intel_gvt_cleanup(dev_priv);
+ intel_gvt_driver_remove(i915);
- intel_modeset_cleanup(dev);
+ intel_modeset_driver_remove(&i915->drm);
- intel_bios_cleanup(dev_priv);
+ intel_bios_driver_remove(i915);
vga_switcheroo_unregister_client(pdev);
vga_client_register(pdev, NULL, NULL, NULL);
- intel_csr_ucode_fini(dev_priv);
+ intel_csr_ucode_fini(i915);
/* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- i915_reset_error_state(dev_priv);
+ cancel_delayed_work_sync(&i915->gt.hangcheck.work);
+ i915_reset_error_state(i915);
- i915_gem_fini_hw(dev_priv);
+ i915_gem_driver_remove(i915);
- intel_power_domains_fini_hw(dev_priv);
+ intel_power_domains_driver_remove(i915);
- i915_driver_cleanup_hw(dev_priv);
+ i915_driver_hw_remove(i915);
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static void i915_driver_release(struct drm_device *dev)
@@ -1994,15 +1681,19 @@ static void i915_driver_release(struct drm_device *dev)
disable_rpm_wakeref_asserts(rpm);
- i915_gem_fini(dev_priv);
+ i915_gem_driver_release(dev_priv);
+
+ i915_ggtt_driver_release(dev_priv);
+
+ /* Paranoia: make sure we have disabled everything before we exit. */
+ intel_sanitize_gt_powersave(dev_priv);
- i915_ggtt_cleanup_hw(dev_priv);
- i915_driver_cleanup_mmio(dev_priv);
+ i915_driver_mmio_release(dev_priv);
enable_rpm_wakeref_asserts(rpm);
- intel_runtime_pm_cleanup(rpm);
+ intel_runtime_pm_driver_release(rpm);
- i915_driver_cleanup_early(dev_priv);
+ i915_driver_late_release(dev_priv);
i915_driver_destroy(dev_priv);
}
@@ -2046,6 +1737,9 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&dev->struct_mutex);
kfree(file_priv);
+
+ /* Catch up with all the deferred frees from "this" client */
+ i915_gem_flush_free_objects(to_i915(dev));
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
@@ -2150,7 +1844,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ int ret = 0;
disable_rpm_wakeref_asserts(rpm);
@@ -2161,12 +1855,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
- ret = 0;
- if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
- bxt_enable_dc9(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hsw_enable_pc8(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_display_power_suspend_late(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
if (ret) {
@@ -2194,34 +1885,29 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out:
enable_rpm_wakeref_asserts(rpm);
- if (!dev_priv->uncore.user_forcewake.count)
- intel_runtime_pm_cleanup(rpm);
+ if (!dev_priv->uncore.user_forcewake_count)
+ intel_runtime_pm_driver_release(rpm);
return ret;
}
-static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
+static int
+i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
- if (!dev) {
- DRM_ERROR("dev: %p\n", dev);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
state.event != PM_EVENT_FREEZE))
return -EINVAL;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- error = i915_drm_suspend(dev);
+ error = i915_drm_suspend(&i915->drm);
if (error)
return error;
- return i915_drm_suspend_late(dev, false);
+ return i915_drm_suspend_late(&i915->drm, false);
}
static int i915_drm_resume(struct drm_device *dev)
@@ -2354,75 +2040,68 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(&dev_priv->uncore);
- i915_check_and_clear_faults(dev_priv);
+ intel_gt_check_and_clear_faults(&dev_priv->gt);
- if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
- gen9_sanitize_dc_state(dev_priv);
- bxt_disable_dc9(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_disable_pc8(dev_priv);
- }
+ intel_display_power_resume_early(dev_priv);
- intel_uncore_sanitize(dev_priv);
+ intel_sanitize_gt_powersave(dev_priv);
intel_power_domains_resume(dev_priv);
- intel_gt_sanitize(dev_priv, true);
+ intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
-static int i915_resume_switcheroo(struct drm_device *dev)
+static int i915_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- ret = i915_drm_resume_early(dev);
+ ret = i915_drm_resume_early(&i915->drm);
if (ret)
return ret;
- return i915_drm_resume(dev);
+ return i915_drm_resume(&i915->drm);
}
static int i915_pm_prepare(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (!dev) {
+ if (!i915) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_prepare(dev);
+ return i915_drm_prepare(&i915->drm);
}
static int i915_pm_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (!dev) {
+ if (!i915) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend(dev);
+ return i915_drm_suspend(&i915->drm);
}
static int i915_pm_suspend_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
/*
* We have a suspend ordering issue with the snd-hda driver also
@@ -2433,55 +2112,55 @@ static int i915_pm_suspend_late(struct device *kdev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(dev, false);
+ return i915_drm_suspend_late(&i915->drm, false);
}
static int i915_pm_poweroff_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(dev, true);
+ return i915_drm_suspend_late(&i915->drm, true);
}
static int i915_pm_resume_early(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume_early(dev);
+ return i915_drm_resume_early(&i915->drm);
}
static int i915_pm_resume(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume(dev);
+ return i915_drm_resume(&i915->drm);
}
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
int ret;
- if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
- ret = i915_drm_suspend(dev);
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend(&i915->drm);
if (ret)
return ret;
}
- ret = i915_gem_freeze(kdev_to_i915(kdev));
+ ret = i915_gem_freeze(i915);
if (ret)
return ret;
@@ -2490,16 +2169,16 @@ static int i915_pm_freeze(struct device *kdev)
static int i915_pm_freeze_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
int ret;
- if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
- ret = i915_drm_suspend_late(dev, true);
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend_late(&i915->drm, true);
if (ret)
return ret;
}
- ret = i915_gem_freeze_late(kdev_to_i915(kdev));
+ ret = i915_gem_freeze_late(i915);
if (ret)
return ret;
@@ -2556,9 +2235,12 @@ static int i915_pm_restore(struct device *kdev)
*/
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
- struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
int i;
+ if (!s)
+ return;
+
/* GAM 0x4000-0x4770 */
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
@@ -2637,10 +2319,13 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
- struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
u32 val;
int i;
+ if (!s)
+ return;
+
/* GAM 0x4000-0x4770 */
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
@@ -2849,8 +2534,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
if (err)
goto err2;
- if (!IS_CHERRYVIEW(dev_priv))
- vlv_save_gunit_s0ix_state(dev_priv);
+ vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
@@ -2880,8 +2564,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- if (!IS_CHERRYVIEW(dev_priv))
- vlv_restore_gunit_s0ix_state(dev_priv);
+ vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
@@ -2901,11 +2584,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
static int intel_runtime_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ int ret = 0;
if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
return -ENODEV;
@@ -2923,24 +2604,16 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_uc_runtime_suspend(dev_priv);
+ intel_gt_runtime_suspend(&dev_priv->gt);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_uncore_suspend(&dev_priv->uncore);
- ret = 0;
- if (INTEL_GEN(dev_priv) >= 11) {
- icl_display_core_uninit(dev_priv);
- bxt_enable_dc9(dev_priv);
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_display_core_uninit(dev_priv);
- bxt_enable_dc9(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_enable_pc8(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_display_power_suspend(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
- }
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
@@ -2948,9 +2621,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(dev_priv);
+ intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
enable_rpm_wakeref_asserts(rpm);
@@ -2959,7 +2631,7 @@ static int intel_runtime_suspend(struct device *kdev)
}
enable_rpm_wakeref_asserts(rpm);
- intel_runtime_pm_cleanup(rpm);
+ intel_runtime_pm_driver_release(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
@@ -3000,9 +2672,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
@@ -3019,40 +2689,20 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- if (INTEL_GEN(dev_priv) >= 11) {
- bxt_disable_dc9(dev_priv);
- icl_display_core_init(dev_priv, true);
- if (dev_priv->csr.dmc_payload) {
- if (dev_priv->csr.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC6)
- skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC5)
- gen9_enable_dc5(dev_priv);
- }
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_disable_dc9(dev_priv);
- bxt_display_core_init(dev_priv, true);
- if (dev_priv->csr.dmc_payload &&
- (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
- gen9_enable_dc5(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_disable_pc8(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_display_power_resume(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true);
- }
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(dev_priv);
-
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev_priv);
+ intel_gt_runtime_resume(&dev_priv->gt);
i915_gem_restore_fences(dev_priv);
/*
@@ -3194,9 +2844,9 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
@@ -3206,7 +2856,7 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
.release = i915_driver_release,
.open = i915_driver_open,
@@ -3222,6 +2872,9 @@ static struct drm_driver driver = {
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
+ .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
+ .get_scanout_position = i915_get_crtc_scanoutpos,
+
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.ioctls = i915_ioctls,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fe7a6ec2c199..772154e4073e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -43,7 +43,7 @@
#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/stackdepot.h>
@@ -68,28 +68,35 @@
#include "display/intel_display_power.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_frontbuffer.h"
+#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
+#include "gem/i915_gem_context_types.h"
+#include "gem/i915_gem_shrinker.h"
+#include "gem/i915_gem_stolen.h"
+
#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt_types.h"
#include "gt/intel_workarounds.h"
+#include "gt/uc/intel_uc.h"
#include "intel_device_info.h"
+#include "intel_pch.h"
#include "intel_runtime_pm.h"
-#include "intel_uc.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
#include "i915_gem.h"
-#include "gem/i915_gem_context_types.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
#include "i915_request.h"
#include "i915_scheduler.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#include "i915_vma.h"
+#include "i915_irq.h"
#include "intel_gvt.h"
@@ -98,45 +105,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190619"
-#define DRIVER_TIMESTAMP 1560947544
-
-/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
- * WARN_ON()) for hw state sanity checks to check for unexpected conditions
- * which may not necessarily be a user visible problem. This will either
- * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
- * enable distros and users to tailor their preferred amount of i915 abrt
- * spam.
- */
-#define I915_STATE_WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- if (!WARN(i915_modparams.verbose_state_checks, format)) \
- DRM_ERROR(format); \
- unlikely(__ret_warn_on); \
-})
-
-#define I915_STATE_WARN_ON(x) \
- I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-
-bool __i915_inject_load_failure(const char *func, int line);
-#define i915_inject_load_failure() \
- __i915_inject_load_failure(__func__, __LINE__)
-
-bool i915_error_injected(void);
-
-#else
-
-#define i915_inject_load_failure() false
-#define i915_error_injected() false
-
-#endif
-
-#define i915_load_error(i915, fmt, ...) \
- __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
+#define DRIVER_DATE "20190822"
+#define DRIVER_TIMESTAMP 1566477988
struct drm_i915_gem_object;
@@ -152,6 +122,10 @@ enum hpd_pin {
HPD_PORT_D,
HPD_PORT_E,
HPD_PORT_F,
+ HPD_PORT_G,
+ HPD_PORT_H,
+ HPD_PORT_I,
+
HPD_NUM_PINS
};
@@ -162,7 +136,7 @@ enum hpd_pin {
#define HPD_STORM_DEFAULT_THRESHOLD 50
struct i915_hotplug {
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct {
unsigned long last_jiffies;
@@ -174,6 +148,7 @@ struct i915_hotplug {
} state;
} stats[HPD_NUM_PINS];
u32 event_bits;
+ u32 retry_bits;
struct delayed_work reenable_work;
u32 long_port_mask;
@@ -286,14 +261,14 @@ struct drm_i915_display_funcs {
enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
- int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
- int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
+ int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
+ int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc_state *crtc_state);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc_state *crtc_state);
void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc_state *crtc_state);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
@@ -306,10 +281,10 @@ struct drm_i915_display_funcs {
int (*crtc_compute_clock)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state);
+ struct intel_atomic_state *old_state);
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state);
- void (*update_crtcs)(struct drm_atomic_state *state);
+ struct intel_atomic_state *old_state);
+ void (*update_crtcs)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -519,24 +494,6 @@ struct i915_psr {
u16 su_x_granularity;
};
-/*
- * Sorted by south display engine compatibility.
- * If the new PCH comes with a south display engine that is not
- * inherited from the latest item, please do not add it to the
- * end. Instead, add it right after its "parent" PCH.
- */
-enum intel_pch {
- PCH_NOP = -1, /* PCH without south display */
- PCH_NONE = 0, /* No PCH present */
- PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
- PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
- PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake PCH */
- PCH_MCC, /* Mule Creek Canyon PCH */
-};
-
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -570,67 +527,7 @@ struct i915_suspend_saved_registers {
u16 saveGCDGMBUS;
};
-struct vlv_s0ix_state {
- /* GAM */
- u32 wr_watermark;
- u32 gfx_prio_ctrl;
- u32 arb_mode;
- u32 gfx_pend_tlb0;
- u32 gfx_pend_tlb1;
- u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
- u32 media_max_req_count;
- u32 gfx_max_req_count;
- u32 render_hwsp;
- u32 ecochk;
- u32 bsd_hwsp;
- u32 blt_hwsp;
- u32 tlb_rd_addr;
-
- /* MBC */
- u32 g3dctl;
- u32 gsckgctl;
- u32 mbctl;
-
- /* GCP */
- u32 ucgctl1;
- u32 ucgctl3;
- u32 rcgctl1;
- u32 rcgctl2;
- u32 rstctl;
- u32 misccpctl;
-
- /* GPM */
- u32 gfxpause;
- u32 rpdeuhwtc;
- u32 rpdeuc;
- u32 ecobus;
- u32 pwrdwnupctl;
- u32 rp_down_timeout;
- u32 rp_deucsw;
- u32 rcubmabdtmr;
- u32 rcedata;
- u32 spare2gh;
-
- /* Display 1 CZ domain */
- u32 gt_imr;
- u32 gt_ier;
- u32 pm_imr;
- u32 pm_ier;
- u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
- /* GT SA CZ domain */
- u32 tilectl;
- u32 gt_fifoctl;
- u32 gtlc_wake_ctrl;
- u32 gtlc_survive;
- u32 pmwgicz;
-
- /* Display 2 CZ domain */
- u32 gu_ctl0;
- u32 gu_ctl1;
- u32 pcbr;
- u32 clock_gate_dis2;
-};
+struct vlv_s0ix_state;
struct intel_rps_ei {
ktime_t ktime;
@@ -764,7 +661,6 @@ struct i915_gem_mm {
*/
struct llist_head free_list;
struct work_struct free_work;
- spinlock_t free_lock;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -781,9 +677,6 @@ struct i915_gem_mm {
*/
struct vfsmount *gemfs;
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_ppgtt *aliasing_ppgtt;
-
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -795,11 +688,6 @@ struct i915_gem_mm {
*/
struct workqueue_struct *userptr_wq;
- u64 unordered_timeline;
-
- /* the indicator for dispatch video commands on two BSD rings */
- atomic_t bsd_engine_dispatch_index;
-
/** Bit 6 swizzling required for X tiling */
u32 bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -1073,6 +961,7 @@ struct i915_frontbuffer_tracking {
};
struct i915_virtual_gpu {
+ struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
u32 caps;
};
@@ -1235,6 +1124,86 @@ struct i915_perf_stream {
* @oa_config: The OA configuration used by the stream.
*/
struct i915_oa_config *oa_config;
+
+ /**
+ * The OA context specific information.
+ */
+ struct intel_context *pinned_ctx;
+ u32 specific_ctx_id;
+ u32 specific_ctx_id_mask;
+
+ struct hrtimer poll_check_timer;
+ wait_queue_head_t poll_wq;
+ bool pollin;
+
+ bool periodic;
+ int period_exponent;
+
+ /**
+ * State of the OA buffer.
+ */
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ u32 last_ctx_id;
+ int format;
+ int format_size;
+ int size_exponent;
+
+ /**
+ * Locks reads and writes to all head/tail state
+ *
+ * Consider: the head and tail pointer state needs to be read
+ * consistently from a hrtimer callback (atomic context) and
+ * read() fop (user context) with tail pointer updates happening
+ * in atomic context and head updates in user context and the
+ * (unlikely) possibility of read() errors needing to reset all
+ * head/tail state.
+ *
+ * Note: Contention/performance aren't currently a significant
+ * concern here considering the relatively low frequency of
+ * hrtimer callbacks (5ms period) and that reads typically only
+ * happen in response to a hrtimer event and likely complete
+ * before the next callback.
+ *
+ * Note: This lock is not held *while* reading and copying data
+ * to userspace so the value of head observed in htrimer
+ * callbacks won't represent any partial consumption of data.
+ */
+ spinlock_t ptr_lock;
+
+ /**
+ * One 'aging' tail pointer and one 'aged' tail pointer ready to
+ * used for reading.
+ *
+ * Initial values of 0xffffffff are invalid and imply that an
+ * update is required (and should be ignored by an attempted
+ * read)
+ */
+ struct {
+ u32 offset;
+ } tails[2];
+
+ /**
+ * Index for the aged tail ready to read() data up to.
+ */
+ unsigned int aged_tail_idx;
+
+ /**
+ * A monotonic timestamp for when the current aging tail pointer
+ * was read; used to determine when it is old enough to trust.
+ */
+ u64 aging_timestamp;
+
+ /**
+ * Although we can always read back the head pointer register,
+ * we prefer to avoid trusting the HW state, just to avoid any
+ * risk that some hardware condition could * somehow bump the
+ * head pointer unpredictably and cause us to forward the wrong
+ * OA buffer data to userspace.
+ */
+ u32 head;
+ } oa_buffer;
};
/**
@@ -1272,7 +1241,7 @@ struct i915_oa_ops {
* @disable_metric_set: Remove system constraints associated with using
* the OA unit.
*/
- void (*disable_metric_set)(struct drm_i915_private *dev_priv);
+ void (*disable_metric_set)(struct i915_perf_stream *stream);
/**
* @oa_enable: Enable periodic sampling
@@ -1300,7 +1269,7 @@ struct i915_oa_ops {
* handling the OA unit tail pointer race that affects multiple
* generations.
*/
- u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
+ u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
};
struct intel_cdclk_state {
@@ -1340,6 +1309,7 @@ struct drm_i915_private {
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
struct intel_uncore uncore;
+ struct intel_uncore_mmio_debug mmio_debug;
struct i915_virtual_gpu vgpu;
@@ -1347,9 +1317,6 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_huc huc;
- struct intel_guc guc;
-
struct intel_csr csr;
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
@@ -1374,13 +1341,12 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct intel_engine_cs *engine[I915_NUM_ENGINES];
+
/* Context used internally to idle the GPU and setup initial state */
struct i915_gem_context *kernel_context;
- /* Context only to be used for injecting preemption commands */
- struct i915_gem_context *preempt_context;
- struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
- [MAX_ENGINE_INSTANCE + 1];
+
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ struct rb_root uabi_engines;
struct resource mch_res;
@@ -1401,11 +1367,7 @@ struct drm_i915_private {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
- u32 gt_irq_mask;
- u32 pm_imr;
- u32 pm_ier;
u32 pm_rps_events;
- u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1422,9 +1384,6 @@ struct drm_i915_private {
/* backlight registers and fields in struct intel_panel */
struct mutex backlight_lock;
- /* LVDS info */
- bool no_aux_handshake;
-
/* protects panel power sequencer state */
struct mutex pps_mutex;
@@ -1488,8 +1447,6 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
- struct intel_ppat ppat;
-
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1586,6 +1543,8 @@ struct drm_i915_private {
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
struct list_head hw_id_list;
} contexts;
@@ -1604,7 +1563,7 @@ struct drm_i915_private {
u32 suspend_count;
bool power_domains_suspended;
struct i915_suspend_saved_registers regfile;
- struct vlv_s0ix_state vlv_s0ix_state;
+ struct vlv_s0ix_state *vlv_s0ix_state;
enum {
I915_SAGV_UNKNOWN = 0,
@@ -1645,7 +1604,7 @@ struct drm_i915_private {
/*
* Should be held around atomic WM register writing; also
* protects * intel_crtc->wm.active and
- * cstate->wm.need_postvbl_update.
+ * crtc_state->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
@@ -1708,155 +1667,39 @@ struct drm_i915_private {
struct mutex lock;
struct list_head streams;
- struct {
- /*
- * The stream currently using the OA unit. If accessed
- * outside a syscall associated to its file
- * descriptor, you need to hold
- * dev_priv->drm.struct_mutex.
- */
- struct i915_perf_stream *exclusive_stream;
-
- struct intel_context *pinned_ctx;
- u32 specific_ctx_id;
- u32 specific_ctx_id_mask;
-
- struct hrtimer poll_check_timer;
- wait_queue_head_t poll_wq;
- bool pollin;
-
- /**
- * For rate limiting any notifications of spurious
- * invalid OA reports
- */
- struct ratelimit_state spurious_report_rs;
-
- bool periodic;
- int period_exponent;
-
- struct i915_oa_config test_config;
-
- struct {
- struct i915_vma *vma;
- u8 *vaddr;
- u32 last_ctx_id;
- int format;
- int format_size;
-
- /**
- * Locks reads and writes to all head/tail state
- *
- * Consider: the head and tail pointer state
- * needs to be read consistently from a hrtimer
- * callback (atomic context) and read() fop
- * (user context) with tail pointer updates
- * happening in atomic context and head updates
- * in user context and the (unlikely)
- * possibility of read() errors needing to
- * reset all head/tail state.
- *
- * Note: Contention or performance aren't
- * currently a significant concern here
- * considering the relatively low frequency of
- * hrtimer callbacks (5ms period) and that
- * reads typically only happen in response to a
- * hrtimer event and likely complete before the
- * next callback.
- *
- * Note: This lock is not held *while* reading
- * and copying data to userspace so the value
- * of head observed in htrimer callbacks won't
- * represent any partial consumption of data.
- */
- spinlock_t ptr_lock;
-
- /**
- * One 'aging' tail pointer and one 'aged'
- * tail pointer ready to used for reading.
- *
- * Initial values of 0xffffffff are invalid
- * and imply that an update is required
- * (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * Index for the aged tail ready to read()
- * data up to.
- */
- unsigned int aged_tail_idx;
-
- /**
- * A monotonic timestamp for when the current
- * aging tail pointer was read; used to
- * determine when it is old enough to trust.
- */
- u64 aging_timestamp;
-
- /**
- * Although we can always read back the head
- * pointer register, we prefer to avoid
- * trusting the HW state, just to avoid any
- * risk that some hardware condition could
- * somehow bump the head pointer unpredictably
- * and cause us to forward the wrong OA buffer
- * data to userspace.
- */
- u32 head;
- } oa_buffer;
-
- u32 gen7_latched_oastatus1;
- u32 ctx_oactxctrl_offset;
- u32 ctx_flexeu0_offset;
-
- /**
- * The RPT_ID/reason field for Gen8+ includes a bit
- * to determine if the CTX ID in the report is valid
- * but the specific bit differs between Gen 8 and 9
- */
- u32 gen8_valid_ctx_bit;
-
- struct i915_oa_ops ops;
- const struct i915_oa_format *oa_formats;
- } oa;
- } perf;
-
- /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
- struct {
- struct i915_gt_timelines {
- struct mutex mutex; /* protects list, tainted by GPU */
- struct list_head active_list;
-
- /* Pack multiple timelines' seqnos into the same page */
- spinlock_t hwsp_lock;
- struct list_head hwsp_free_list;
- } timelines;
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor, you need to hold
+ * dev_priv->drm.struct_mutex.
+ */
+ struct i915_perf_stream *exclusive_stream;
- struct list_head active_rings;
+ /**
+ * For rate limiting any notifications of spurious
+ * invalid OA reports
+ */
+ struct ratelimit_state spurious_report_rs;
- struct intel_wakeref wakeref;
+ struct i915_oa_config test_config;
- struct list_head closed_vma;
- spinlock_t closed_lock; /* guards the list of closed_vma */
+ u32 gen7_latched_oastatus1;
+ u32 ctx_oactxctrl_offset;
+ u32 ctx_flexeu0_offset;
/**
- * Is the GPU currently considered idle, or busy executing
- * userspace requests? Whilst idle, we allow runtime power
- * management to power down the hardware and display clocks.
- * In order to reduce the effect on performance, there
- * is a slight delay before we do so.
+ * The RPT_ID/reason field for Gen8+ includes a bit
+ * to determine if the CTX ID in the report is valid
+ * but the specific bit differs between Gen 8 and 9
*/
- intel_wakeref_t awake;
-
- struct blocking_notifier_head pm_notifications;
+ u32 gen8_valid_ctx_bit;
- ktime_t last_init_time;
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
+ } perf;
- struct i915_vma *scratch;
- } gt;
+ /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
+ struct intel_gt gt;
struct {
struct notifier_block pm_notifier;
@@ -1933,27 +1776,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
- return to_i915(dev_get_drvdata(kdev));
+ return dev_get_drvdata(kdev);
}
-static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
+static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
{
- return container_of(wopcm, struct drm_i915_private, wopcm);
-}
-
-static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
-{
- return container_of(guc, struct drm_i915_private, guc);
-}
-
-static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
-{
- return container_of(huc, struct drm_i915_private, huc);
-}
-
-static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
-{
- return container_of(uncore, struct drm_i915_private, uncore);
+ return pci_get_drvdata(pdev);
}
/* Simple iterator over all initialised engines */
@@ -1970,12 +1798,13 @@ static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncor
((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
-enum hdmi_force_audio {
- HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
- HDMI_AUDIO_OFF, /* force turn off HDMI audio */
- HDMI_AUDIO_AUTO, /* trust EDID */
- HDMI_AUDIO_ON, /* force turn on HDMI audio */
-};
+#define rb_to_uabi_engine(rb) \
+ rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
+
+#define for_each_uabi_engine(engine__, i915__) \
+ for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
+ (engine__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
#define I915_GTT_OFFSET_NONE ((u32)-1)
@@ -2127,6 +1956,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
+#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -2323,63 +2153,16 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
-/*
- * For now, anything with a GuC requires uCode loading, and then supports
- * command submission once loaded. But these are logically independent
- * properties, so we have separate macros to test them.
- */
-#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
-#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* For now, anything with a GuC has also HuC */
-#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
-
-/* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
-#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv)
+/* Having GuC is not the same as using GuC */
+#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc)
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
-#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
+
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
@@ -2395,8 +2178,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
-#include "i915_trace.h"
-
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -2418,48 +2199,19 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-void __printf(3, 4)
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...);
-
-#define i915_report_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
-
#ifdef CONFIG_COMPAT
-extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#else
#define i915_compat_ioctl NULL
#endif
extern const struct dev_pm_ops i915_pm_ops;
-extern int i915_driver_load(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-extern void i915_driver_unload(struct drm_device *dev);
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+void i915_driver_remove(struct drm_i915_private *i915);
-extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
-extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
-
-static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
-{
- unsigned long delay;
-
- if (unlikely(!i915_modparams.enable_hangcheck))
- return;
-
- /* Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
-
- delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
- queue_delayed_work(system_long_wq,
- &dev_priv->gpu_error.hangcheck_work, delay);
-}
-
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@@ -2470,6 +2222,9 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
+int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* i915_gem.c */
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
@@ -2481,18 +2236,17 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
- if (!atomic_read(&i915->mm.free_count))
- return;
-
- /* A single pass should suffice to release all the freed objects (along
+ /*
+ * A single pass should suffice to release all the freed objects (along
* most call paths) , but be a little more paranoid in that freeing
* the objects does take a little amount of time, during which the rcu
* callbacks could have added new objects into the freed list, and
* armed the work again.
*/
- do {
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
rcu_barrier();
- } while (flush_work(&i915->mm.free_work));
+ }
}
static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
@@ -2510,6 +2264,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
*/
int pass = 3;
do {
+ flush_workqueue(i915->wq);
rcu_barrier();
i915_gem_drain_freed_objects(i915);
} while (--pass);
@@ -2523,7 +2278,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -2540,42 +2297,26 @@ int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
u32 handle, u64 *offset);
int i915_gem_mmap_gtt_version(void);
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits);
-
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-static inline bool __i915_wedged(struct i915_gpu_error *error)
-{
- return unlikely(test_bit(I915_WEDGED, &error->flags));
-}
-
-static inline bool i915_reset_failed(struct drm_i915_private *i915)
-{
- return __i915_wedged(&i915->gpu_error);
-}
-
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return READ_ONCE(error->reset_count);
+ return atomic_read(&error->reset_count);
}
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
struct intel_engine_cs *engine)
{
- return READ_ONCE(error->reset_engine_count[engine->id]);
+ return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
-
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
-void i915_gem_fini(struct drm_i915_private *dev_priv);
+void i915_gem_driver_register(struct drm_i915_private *i915);
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
+void i915_gem_driver_release(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
@@ -2592,8 +2333,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags);
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
@@ -2615,16 +2355,6 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
return ctx;
}
-int i915_perf_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *reg_state);
-
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
@@ -2636,59 +2366,11 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm);
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
-
-/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
-{
- wmb();
- if (INTEL_GEN(dev_priv) < 6)
- intel_gtt_chipset_flush();
-}
-
-/* i915_gem_stolen.c */
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node, u64 size,
- unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node, u64 size,
- unsigned alignment, u64 start,
- u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- resource_size_t stolen_offset,
- resource_size_t gtt_offset,
- resource_size_t size);
-
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size);
-/* i915_gem_shrinker.c */
-unsigned long i915_gem_shrink(struct drm_i915_private *i915,
- unsigned long target,
- unsigned long *nr_scanned,
- unsigned flags);
-#define I915_SHRINK_UNBOUND BIT(0)
-#define I915_SHRINK_BOUND BIT(1)
-#define I915_SHRINK_ACTIVE BIT(2)
-#define I915_SHRINK_VMAPS BIT(3)
-#define I915_SHRINK_WRITEBACK BIT(4)
-
-unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
-void i915_gem_shrinker_register(struct drm_i915_private *i915);
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
- struct mutex *mutex);
-
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
@@ -2716,20 +2398,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
u32 batch_len,
bool is_master);
-/* i915_perf.c */
-extern void i915_perf_init(struct drm_i915_private *dev_priv);
-extern void i915_perf_fini(struct drm_i915_private *dev_priv);
-extern void i915_perf_register(struct drm_i915_private *dev_priv);
-extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
-
-/* i915_suspend.c */
-extern int i915_save_state(struct drm_i915_private *dev_priv);
-extern int i915_restore_state(struct drm_i915_private *dev_priv);
-
-/* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_i915_private *dev_priv);
-void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
@@ -2737,25 +2405,9 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
-/* modesetting */
-extern void intel_modeset_init_hw(struct drm_device *dev);
-extern int intel_modeset_init(struct drm_device *dev);
-extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
- bool state);
-extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
-extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
-extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-extern struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv);
-extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct intel_display_error_state *error);
-
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
@@ -2793,29 +2445,19 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-/* "Broadcast RGB" property */
-#define INTEL_BROADCAST_RGB_AUTO 0
-#define INTEL_BROADCAST_RGB_FULL 1
-#define INTEL_BROADCAST_RGB_LIMITED 2
-
-void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
-bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+/* register wait wrappers for display regs */
+#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
+ intel_wait_for_register(&(dev_priv_)->uncore, \
+ (reg_), (mask_), (value_), (timeout_))
-/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
- * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
- * perform the operation. To check beforehand, pass in the parameters to
- * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
- * you only need to pass in the minor offsets, page-aligned pointers are
- * always valid.
- *
- * For just checking for SSE4.1, in the foreknowledge that the future use
- * will be correctly aligned, just use i915_has_memcpy_from_wc().
- */
-#define i915_can_memcpy_from_wc(dst, src, len) \
- i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \
+ u32 mask__ = (mask_); \
+ intel_de_wait_for_register((dev_priv_), (reg_), \
+ mask__, mask__, (timeout_)); \
+})
-#define i915_has_memcpy_from_wc() \
- i915_memcpy_from_wc(NULL, NULL, 0)
+#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
+ intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
@@ -2830,26 +2472,10 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
-static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
-{
- return i915_ggtt_offset(i915->gt.scratch);
-}
-
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915)
{
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline void add_taint_for_CI(unsigned int taint)
-{
- /*
- * The system is "ok", just about surviving for the user, but
- * CI results are now unreliable as the HW is very suspect.
- * CI checks the taint state after every test and will reboot
- * the machine if the kernel is tainted.
- */
- add_taint(taint, LOCKDEP_STILL_OK);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
index 6621595fe74c..a327094de2bd 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -6,6 +6,11 @@
#ifndef _I915_FIXED_H_
#define _I915_FIXED_H_
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
typedef struct {
u32 val;
} uint_fixed_16_16_t;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8a659d3d7435..95e7c52cf8ed 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,7 @@
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
@@ -46,9 +46,12 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
+#include "gt/intel_renderstate.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
@@ -56,7 +59,6 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#include "intel_pm.h"
static int
@@ -100,7 +102,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
@@ -115,7 +118,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
list_move_tail(&vma->obj_link, &still_in_list);
spin_unlock(&obj->vma.lock);
- ret = i915_vma_unbind(vma);
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))
+ ret = i915_vma_unbind(vma);
spin_lock(&obj->vma.lock);
}
@@ -133,17 +139,19 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- /* We manually control the domain here and pretend that it
+ /*
+ * We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(to_i915(obj->base.dev));
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
return 0;
}
@@ -232,46 +240,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
&args->size, &args->handle);
}
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * No actual flushing is required for the GTT write domain for reads
- * from the GTT domain. Writes to it "immediately" go to main memory
- * as far as we know, so there's no chipset flush. It also doesn't
- * land in the GPU render cache.
- *
- * However, we do have to enforce the order so that all writes through
- * the GTT land before any writes to the device, such as updates to
- * the GATT itself.
- *
- * We also have to wait a bit for the writes to land from the GTT.
- * An uncached read (i.e. mmio) seems to be ideal for the round-trip
- * timing. This issue has only been observed when switching quickly
- * between GTT writes and CPU reads from inside the kernel on recent hw,
- * and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour, until Cannonlake
- * that was!).
- */
-
- wmb();
-
- if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
- return;
-
- i915_gem_chipset_flush(dev_priv);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- spin_lock_irq(&uncore->lock);
- intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&uncore->lock);
- }
-}
-
static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
@@ -375,20 +343,16 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
return ret;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_unlock;
@@ -430,11 +394,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
- wmb();
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
- wmb();
} else {
page_base += offset & PAGE_MASK;
}
@@ -454,7 +416,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) {
- wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
@@ -592,20 +553,16 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
wakeref = intel_runtime_pm_get(rpm);
}
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
@@ -631,7 +588,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_unpin;
}
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -648,7 +605,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
- wmb(); /* flush the write before we modify the GGTT */
+ /* flush the write before we modify the GGTT */
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -672,13 +630,13 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
if (node.allocated) {
- wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
@@ -765,7 +723,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
return ret;
@@ -929,35 +887,23 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static int wait_for_engines(struct drm_i915_private *i915)
-{
- if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
- dev_err(i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
- return -EIO;
- }
-
- return 0;
-}
-
static long
wait_for_timelines(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
+ unsigned int wait, long timeout)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline *tl;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
+ struct intel_timeline *tl;
+ unsigned long flags;
- mutex_lock(&gt->mutex);
- list_for_each_entry(tl, &gt->active_list, link) {
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
- mutex_unlock(&gt->mutex);
+ spin_unlock_irqrestore(&timelines->lock, flags);
/*
* "Race-to-idle".
@@ -968,19 +914,19 @@ wait_for_timelines(struct drm_i915_private *i915,
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks.
*/
- if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ if (wait & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq);
- timeout = i915_request_wait(rq, flags, timeout);
+ timeout = i915_request_wait(rq, wait, timeout);
i915_request_put(rq);
if (timeout < 0)
return timeout;
/* restart after reacquiring the lock */
- mutex_lock(&gt->mutex);
- tl = list_entry(&gt->active_list, typeof(*tl), link);
+ spin_lock_irqsave(&timelines->lock, flags);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&gt->mutex);
+ spin_unlock_irqrestore(&timelines->lock, flags);
return timeout;
}
@@ -988,28 +934,21 @@ wait_for_timelines(struct drm_i915_private *i915,
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
- GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
- flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
- yesno(i915->gt.awake));
-
/* If the device is asleep, we have no requests outstanding */
- if (!READ_ONCE(i915->gt.awake))
+ if (!intel_gt_pm_is_awake(&i915->gt))
return 0;
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+
timeout = wait_for_timelines(i915, flags, timeout);
if (timeout < 0)
return timeout;
if (flags & I915_WAIT_LOCKED) {
- int err;
-
lockdep_assert_held(&i915->drm.struct_mutex);
- err = wait_for_engines(i915);
- if (err)
- return err;
-
i915_retire_requests(i915);
}
@@ -1088,6 +1027,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
+ if (vma->fence && !i915_gem_object_is_tiled(obj)) {
+ mutex_lock(&vma->vm->mutex);
+ ret = i915_vma_revoke_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -1184,8 +1131,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
- if (i915_terminally_wedged(i915))
- i915_gem_unset_wedged(i915);
+ if (intel_gt_is_wedged(&i915->gt))
+ intel_gt_unset_wedged(&i915->gt);
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -1195,82 +1142,72 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
+static void init_unused_ring(struct intel_gt *gt, u32 base)
{
- if (INTEL_GEN(dev_priv) < 5 ||
- dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
- return;
-
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
- DISP_TILE_SURFACE_SWIZZLING);
-
- if (IS_GEN(dev_priv, 5))
- return;
-
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN(dev_priv, 6))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN(dev_priv, 7))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN(dev_priv, 8))
- I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
- else
- BUG();
-}
+ struct intel_uncore *uncore = gt->uncore;
-static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
-{
- I915_WRITE(RING_CTL(base), 0);
- I915_WRITE(RING_HEAD(base), 0);
- I915_WRITE(RING_TAIL(base), 0);
- I915_WRITE(RING_START(base), 0);
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
}
-static void init_unused_rings(struct drm_i915_private *dev_priv)
+static void init_unused_rings(struct intel_gt *gt)
{
- if (IS_I830(dev_priv)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- init_unused_ring(dev_priv, SRB2_BASE);
- init_unused_ring(dev_priv, SRB3_BASE);
- } else if (IS_GEN(dev_priv, 2)) {
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- } else if (IS_GEN(dev_priv, 3)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, PRB2_BASE);
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (IS_GEN(i915, 2)) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (IS_GEN(i915, 3)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
}
}
-int i915_gem_init_hw(struct drm_i915_private *dev_priv)
+int i915_gem_init_hw(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
+ struct intel_gt *gt = &i915->gt;
int ret;
- dev_priv->gt.last_init_time = ktime_get();
+ BUG_ON(!i915->kernel_context);
+ ret = intel_gt_terminally_wedged(gt);
+ if (ret)
+ return ret;
+
+ gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
- I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+ if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
- if (IS_HASWELL(dev_priv))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
- LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
- intel_gt_apply_workarounds(dev_priv);
+ intel_gt_apply_workarounds(gt);
/* ...and determine whether they are sticking. */
- intel_gt_verify_workarounds(dev_priv, "init");
+ intel_gt_verify_workarounds(gt, "init");
- i915_gem_init_swizzling(dev_priv);
+ intel_gt_init_swizzling(gt);
/*
* At least 830 can leave some of the unused rings
@@ -1278,49 +1215,32 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
- init_unused_rings(dev_priv);
+ init_unused_rings(gt);
- BUG_ON(!dev_priv->kernel_context);
- ret = i915_terminally_wedged(dev_priv);
- if (ret)
- goto out;
-
- ret = i915_ppgtt_init_hw(dev_priv);
+ ret = i915_ppgtt_init_hw(gt);
if (ret) {
DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
- ret = intel_wopcm_init_hw(&dev_priv->wopcm);
- if (ret) {
- DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
- goto out;
- }
-
/* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(dev_priv);
+ ret = intel_uc_init_hw(&gt->uc);
if (ret) {
- DRM_ERROR("Enabling uc failed (%d)\n", ret);
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
goto out;
}
- intel_mocs_init_l3cc_table(dev_priv);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- intel_engines_set_scheduler_caps(dev_priv);
- return 0;
+ intel_mocs_init(gt);
out:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- struct i915_gem_engines *e;
enum intel_engine_id id;
int err = 0;
@@ -1333,46 +1253,72 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
* from the same default HW values.
*/
- ctx = i915_gem_context_create_kernel(i915, 0);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- e = i915_gem_context_lock_engines(ctx);
-
for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
+ struct intel_context *ce;
struct i915_request *rq;
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+ engine->serial++; /* force the kernel context switch */
+
+ ce = intel_context_create(i915->kernel_context, engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_active;
+ intel_context_put(ce);
+ goto out;
}
- err = 0;
- if (rq->engine->init_context)
- err = rq->engine->init_context(rq);
+ err = intel_engine_emit_ctx_wa(rq);
+ if (err)
+ goto err_rq;
+
+ /*
+ * Failing to program the MOCS is non-fatal.The system will not
+ * run at peak performance. So warn the user and carry on.
+ */
+ err = intel_mocs_emit(rq);
+ if (err)
+ dev_notice(i915->drm.dev,
+ "Failed to program MOCS registers; expect performance issues.\n");
+ err = intel_renderstate_emit(rq);
+ if (err)
+ goto err_rq;
+
+err_rq:
+ requests[id] = i915_request_get(rq);
i915_request_add(rq);
if (err)
- goto err_active;
+ goto out;
}
/* Flush the default context image to memory, and enable powersaving. */
if (!i915_gem_load_power_context(i915)) {
err = -EIO;
- goto err_active;
+ goto out;
}
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
- struct i915_vma *state = ce->state;
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct i915_request *rq;
+ struct i915_vma *state;
void *vaddr;
- if (!state)
+ rq = requests[id];
+ if (!rq)
continue;
- GEM_BUG_ON(intel_context_is_pinned(ce));
+ /* We want to be able to unbind the state from the GGTT */
+ GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+
+ state = rq->hw_context->state;
+ if (!state)
+ continue;
/*
* As we will hold a reference to the logical state, it will
@@ -1384,99 +1330,60 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
*/
err = i915_vma_unbind(state);
if (err)
- goto err_active;
+ goto out;
i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
i915_gem_object_unlock(state->obj);
if (err)
- goto err_active;
+ goto out;
- engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_set_cache_coherency(engine->default_state,
- I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_FORCE_WB);
+ vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
- goto err_active;
+ goto out;
}
- i915_gem_object_unpin_map(engine->default_state);
+ rq->engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_unpin_map(state->obj);
}
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
- unsigned int found = intel_engines_has_context_isolation(i915);
-
- /*
- * Make sure that classes with multiple engine instances all
- * share the same basic configuration.
- */
- for_each_engine(engine, i915, id) {
- unsigned int bit = BIT(engine->uabi_class);
- unsigned int expected = engine->default_state ? bit : 0;
-
- if ((found & bit) != expected) {
- DRM_ERROR("mismatching default context state for class %d on engine %s\n",
- engine->uabi_class, engine->name);
- }
- }
- }
-
-out_ctx:
- i915_gem_context_unlock_engines(ctx);
- i915_gem_context_set_closed(ctx);
- i915_gem_context_put(ctx);
- return err;
-
-err_active:
+out:
/*
* If we have to abandon now, we expect the engines to be idle
* and ready to be torn-down. The quickest way we can accomplish
* this is by declaring ourselves wedged.
*/
- i915_gem_set_wedged(i915);
- goto out_ctx;
-}
+ if (err)
+ intel_gt_set_wedged(&i915->gt);
-static int
-i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
- obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
+ rq = requests[id];
+ if (!rq)
+ continue;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
+ ce = rq->hw_context;
+ i915_request_put(rq);
+ intel_context_put(ce);
}
+ return err;
+}
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- i915->gt.scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ return intel_gt_init_scratch(&i915->gt, size);
}
static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{
- i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+ intel_gt_fini_scratch(&i915->gt);
}
static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
@@ -1505,21 +1412,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
- dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
-
- i915_timelines_init(dev_priv);
+ intel_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
- if (ret)
- return ret;
-
- ret = intel_wopcm_init(&dev_priv->wopcm);
- if (ret)
- goto err_uc_misc;
+ intel_uc_fetch_firmwares(&dev_priv->gt.uc);
+ intel_wopcm_init(&dev_priv->wopcm);
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -1530,7 +1430,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- ret = i915_gem_init_ggtt(dev_priv);
+ ret = i915_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
@@ -1563,16 +1463,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_init_gt_powersave(dev_priv);
- ret = intel_uc_init(dev_priv);
- if (ret)
- goto err_pm;
+ intel_uc_init(&dev_priv->gt.uc);
ret = i915_gem_init_hw(dev_priv);
if (ret)
goto err_uc_init;
/* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_gt_resume(dev_priv);
+ ret = intel_gt_resume(&dev_priv->gt);
if (ret)
goto err_init_hw;
@@ -1595,15 +1493,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_gt;
- if (i915_inject_load_failure()) {
- ret = -ENODEV;
+ ret = i915_inject_load_error(dev_priv, -ENODEV);
+ if (ret)
goto err_gt;
- }
- if (i915_inject_load_failure()) {
- ret = -EIO;
+ ret = i915_inject_load_error(dev_priv, -EIO);
+ if (ret)
goto err_gt;
- }
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1619,7 +1515,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex);
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&dev_priv->gt);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
@@ -1627,11 +1523,10 @@ err_gt:
mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
- intel_uc_fini_hw(dev_priv);
+ intel_uc_fini_hw(&dev_priv->gt.uc);
err_uc_init:
- intel_uc_fini(dev_priv);
-err_pm:
if (ret != -EIO) {
+ intel_uc_fini(&dev_priv->gt.uc);
intel_cleanup_gt_powersave(dev_priv);
intel_engines_cleanup(dev_priv);
}
@@ -1645,26 +1540,24 @@ err_unlock:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
-err_uc_misc:
- intel_uc_fini_misc(dev_priv);
-
if (ret != -EIO) {
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
+ intel_timelines_fini(dev_priv);
}
if (ret == -EIO) {
mutex_lock(&dev_priv->drm.struct_mutex);
/*
- * Allow engine initialisation to fail by marking the GPU as
- * wedged. But we only want to do this where the GPU is angry,
+ * Allow engines or uC initialisation to fail by marking the GPU
+ * as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!i915_reset_failed(dev_priv)) {
- i915_load_error(dev_priv,
- "Failed to initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(dev_priv);
+ if (!intel_gt_is_wedged(&dev_priv->gt)) {
+ i915_probe_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ intel_gt_set_wedged(&dev_priv->gt);
}
/* Minimal basic recovery for KMS */
@@ -1680,7 +1573,19 @@ err_uc_misc:
return ret;
}
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
+void i915_gem_driver_register(struct drm_i915_private *i915)
+{
+ i915_gem_driver_register__shrinker(i915);
+
+ intel_engines_driver_register(i915);
+}
+
+void i915_gem_driver_unregister(struct drm_i915_private *i915)
+{
+ i915_gem_driver_unregister__shrinker(i915);
+}
+
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
GEM_BUG_ON(dev_priv->gt.awake);
@@ -1693,14 +1598,14 @@ void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
+ intel_uc_fini_hw(&dev_priv->gt.uc);
+ intel_uc_fini(&dev_priv->gt.uc);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
}
-void i915_gem_fini(struct drm_i915_private *dev_priv)
+void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
intel_engines_cleanup(dev_priv);
@@ -1712,9 +1617,9 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_cleanup_gt_powersave(dev_priv);
- intel_uc_fini_misc(dev_priv);
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
+ intel_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -1729,7 +1634,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.obj_lock);
- spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
@@ -1743,22 +1647,9 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err;
- intel_gt_pm_init(dev_priv);
-
- INIT_LIST_HEAD(&dev_priv->gt.active_rings);
- INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
- spin_lock_init(&dev_priv->gt.closed_lock);
-
i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv);
- init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
- init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- mutex_init(&dev_priv->gpu_error.wedge_mutex);
- init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
-
- atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
-
spin_lock_init(&dev_priv->fb_tracking.lock);
err = i915_gemfs_init(dev_priv);
@@ -1775,8 +1666,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
- cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
-
i915_gemfs_fini(dev_priv);
}
@@ -1869,39 +1758,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
return ret;
}
-/**
- * i915_gem_track_fb - update frontbuffer tracking
- * @old: current GEM buffer for the frontbuffer slots
- * @new: new GEM buffer for the frontbuffer slots
- * @frontbuffer_bits: bitmask of frontbuffer slots
- *
- * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
- * from @old and setting them in @new. Both @old and @new can be NULL.
- */
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits)
-{
- /* Control of individual bits within the mask are guarded by
- * the owning plane->mutex, i.e. we can never see concurrent
- * manipulation of individual bits. But since the bitfield as a whole
- * is updated using RMW, we need to use atomics in order to update
- * the bits.
- */
- BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
- BITS_PER_TYPE(atomic_t));
-
- if (old) {
- WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
- atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
- }
-
- if (new) {
- WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
- atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
- }
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index fe82d3571072..167a7b56ed5b 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,6 +28,8 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
+#include <drm/drm_drv.h>
+
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
deleted file mode 100644
index 25a3e4d09a2f..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "i915_gem_batch_pool.h"
-#include "i915_drv.h"
-
-/**
- * DOC: batch pool
- *
- * In order to submit batch buffers as 'secure', the software command parser
- * must ensure that a batch buffer cannot be modified after parsing. It does
- * this by copying the user provided batch buffer contents to a kernel owned
- * buffer from which the hardware will actually execute, and by carefully
- * managing the address space bindings for such buffers.
- *
- * The batch pool framework provides a mechanism for the driver to manage a
- * set of scratch buffers to use for this purpose. The framework can be
- * extended to support other uses cases should they arise.
- */
-
-/**
- * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @pool: the batch buffer pool
- * @engine: the associated request submission engine
- */
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
- struct intel_engine_cs *engine)
-{
- int n;
-
- pool->engine = engine;
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
- INIT_LIST_HEAD(&pool->cache_list[n]);
-}
-
-/**
- * i915_gem_batch_pool_fini() - clean up a batch buffer pool
- * @pool: the pool to clean up
- *
- * Note: Callers must hold the struct_mutex.
- */
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
-{
- int n;
-
- lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &pool->cache_list[n],
- batch_pool_link)
- i915_gem_object_put(obj);
-
- INIT_LIST_HEAD(&pool->cache_list[n]);
- }
-}
-
-/**
- * i915_gem_batch_pool_get() - allocate a buffer from the pool
- * @pool: the batch buffer pool
- * @size: the minimum desired size of the returned buffer
- *
- * Returns an inactive buffer from @pool with at least @size bytes,
- * with the pages pinned. The caller must i915_gem_object_unpin_pages()
- * on the returned object.
- *
- * Note: Callers must hold the struct_mutex
- *
- * Return: the buffer object or an error pointer
- */
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
- size_t size)
-{
- struct drm_i915_gem_object *obj;
- struct list_head *list;
- int n, ret;
-
- lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
- /* Compute a power-of-two bucket, but throw everything greater than
- * 16KiB into the same bucket: i.e. the the buckets hold objects of
- * (1 page, 2 pages, 4 pages, 8+ pages).
- */
- n = fls(size >> PAGE_SHIFT) - 1;
- if (n >= ARRAY_SIZE(pool->cache_list))
- n = ARRAY_SIZE(pool->cache_list) - 1;
- list = &pool->cache_list[n];
-
- list_for_each_entry(obj, list, batch_pool_link) {
- /* The batches are strictly LRU ordered */
- if (i915_gem_object_is_active(obj)) {
- struct reservation_object *resv = obj->base.resv;
-
- if (!reservation_object_test_signaled_rcu(resv, true))
- break;
-
- i915_retire_requests(pool->engine->i915);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
-
- /*
- * The object is now idle, clear the array of shared
- * fences before we add a new request. Although, we
- * remain on the same engine, we may be on a different
- * timeline and so may continually grow the array,
- * trapping a reference to all the old fences, rather
- * than replace the existing fence.
- */
- if (rcu_access_pointer(resv->fence)) {
- reservation_object_lock(resv, NULL);
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
- }
-
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
- true));
-
- if (obj->base.size >= size)
- goto found;
- }
-
- obj = i915_gem_object_create_internal(pool->engine->i915, size);
- if (IS_ERR(obj))
- return obj;
-
-found:
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ERR_PTR(ret);
-
- list_move_tail(&obj->batch_pool_link, list);
- return obj;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
deleted file mode 100644
index feeeeeaa54d8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef I915_GEM_BATCH_POOL_H
-#define I915_GEM_BATCH_POOL_H
-
-#include <linux/types.h>
-
-struct drm_i915_gem_object;
-struct intel_engine_cs;
-
-struct i915_gem_batch_pool {
- struct intel_engine_cs *engine;
- struct list_head cache_list[4];
-};
-
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
- struct intel_engine_cs *engine);
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
-
-#endif /* I915_GEM_BATCH_POOL_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a5783c4cb98b..52c86c6e0673 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -31,7 +31,6 @@
#include "gem/i915_gem_context.h"
#include "i915_drv.h"
-#include "intel_drv.h"
#include "i915_trace.h"
I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
@@ -62,9 +61,6 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
- if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
- return false;
-
list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node);
}
@@ -331,11 +327,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
- ret = -ENOSPC;
- break;
- }
-
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 0bf53ac1c835..615a9f4ef30c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -230,16 +230,14 @@ static int fence_update(struct i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
- ret = i915_active_request_retire(&vma->last_fence,
- &vma->obj->base.dev->struct_mutex);
+ ret = i915_active_wait(&vma->active);
if (ret)
return ret;
}
old = xchg(&fence->vma, NULL);
if (old) {
- ret = i915_active_request_retire(&old->last_fence,
- &old->obj->base.dev->struct_mutex);
+ ret = i915_active_wait(&old->active);
if (ret) {
fence->vma = old;
return ret;
@@ -289,7 +287,7 @@ static int fence_update(struct i915_fence_reg *fence,
}
/**
- * i915_vma_put_fence - force-remove fence for a VMA
+ * i915_vma_revoke_fence - force-remove fence for a VMA
* @vma: vma to map linearly (not through a fence reg)
*
* This function force-removes any fence from the given object, which is useful
@@ -299,14 +297,15 @@ static int fence_update(struct i915_fence_reg *fence,
*
* 0 on success, negative error code on failure.
*/
-int i915_vma_put_fence(struct i915_vma *vma)
+int i915_vma_revoke_fence(struct i915_vma *vma)
{
struct i915_fence_reg *fence = vma->fence;
+ lockdep_assert_held(&vma->vm->mutex);
if (!fence)
return 0;
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
return -EBUSY;
return fence_update(fence, NULL);
@@ -319,7 +318,7 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
continue;
return fence;
@@ -332,6 +331,48 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
return ERR_PTR(-EDEADLK);
}
+static int __i915_vma_pin_fence(struct i915_vma *vma)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_fence_reg *fence;
+ struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ int err;
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (vma->fence) {
+ fence = vma->fence;
+ GEM_BUG_ON(fence->vma != vma);
+ atomic_inc(&fence->pin_count);
+ if (!fence->dirty) {
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ return 0;
+ }
+ } else if (set) {
+ fence = fence_find(vma->vm->i915);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+ atomic_inc(&fence->pin_count);
+ } else {
+ return 0;
+ }
+
+ err = fence_update(fence, set);
+ if (err)
+ goto out_unpin;
+
+ GEM_BUG_ON(fence->vma != set);
+ GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+
+ if (set)
+ return 0;
+
+out_unpin:
+ atomic_dec(&fence->pin_count);
+ return err;
+}
+
/**
* i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg
@@ -352,8 +393,6 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
*/
int i915_vma_pin_fence(struct i915_vma *vma)
{
- struct i915_fence_reg *fence;
- struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
/*
@@ -361,39 +400,16 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- /* Just update our place in the LRU if our fence is getting reused. */
- if (vma->fence) {
- fence = vma->fence;
- GEM_BUG_ON(fence->vma != vma);
- fence->pin_count++;
- if (!fence->dirty) {
- list_move_tail(&fence->link,
- &fence->i915->ggtt.fence_list);
- return 0;
- }
- } else if (set) {
- fence = fence_find(vma->vm->i915);
- if (IS_ERR(fence))
- return PTR_ERR(fence);
-
- GEM_BUG_ON(fence->pin_count);
- fence->pin_count++;
- } else
- return 0;
-
- err = fence_update(fence, set);
+ err = mutex_lock_interruptible(&vma->vm->mutex);
if (err)
- goto out_unpin;
+ return err;
- GEM_BUG_ON(fence->vma != set);
- GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+ err = __i915_vma_pin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
- if (set)
- return 0;
-
-out_unpin:
- fence->pin_count--;
return err;
}
@@ -406,16 +422,17 @@ out_unpin:
*/
struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_fence_reg *fence;
int count;
int ret;
- lockdep_assert_held(&i915->drm.struct_mutex);
+ lockdep_assert_held(&ggtt->vm.mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link)
- count += !fence->pin_count;
+ list_for_each_entry(fence, &ggtt->fence_list, link)
+ count += !atomic_read(&fence->pin_count);
if (count <= 1)
return ERR_PTR(-ENOSPC);
@@ -431,6 +448,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
}
list_del(&fence->link);
+
return fence;
}
@@ -442,9 +460,11 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
- lockdep_assert_held(&fence->i915->drm.struct_mutex);
+ struct i915_ggtt *ggtt = &fence->i915->ggtt;
+
+ lockdep_assert_held(&ggtt->vm.mutex);
- list_add(&fence->link, &fence->i915->ggtt.fence_list);
+ list_add(&fence->link, &ggtt->fence_list);
}
/**
@@ -834,3 +854,35 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
i915_gem_restore_fences(i915);
}
+
+void intel_gt_init_swizzling(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ if (INTEL_GEN(i915) < 5 ||
+ i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN(i915, 5))
+ return;
+
+ intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
+
+ if (IS_GEN(i915, 6))
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else if (IS_GEN(i915, 7))
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else if (IS_GEN(i915, 8))
+ intel_uncore_write(uncore,
+ GAMTARBMODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ else
+ MISSING_CASE(INTEL_GEN(i915));
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index d2da98828179..99866fb9d94f 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -32,6 +32,7 @@ struct drm_i915_gem_object;
struct drm_i915_private;
struct i915_ggtt;
struct i915_vma;
+struct intel_gt;
struct sg_table;
#define I965_FENCE_PAGE 4096UL
@@ -40,7 +41,7 @@ struct i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
struct i915_vma *vma;
- int pin_count;
+ atomic_t pin_count;
int id;
/**
* Whether the tiling parameters for the currently
@@ -66,4 +67,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_gt_init_swizzling(struct intel_gt *gt);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7015a97b1097..b1a7a8b9b46a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -32,19 +32,26 @@
#include <linux/stop_machine.h>
#include <asm/set_memory.h>
+#include <asm/smp.h>
#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
+#define DBG(...) trace_printk(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
/**
* DOC: Global GTT views
*
@@ -106,12 +113,14 @@
*
*/
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
+static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
/*
* Note that as an uncached mmio write, this will flush the
@@ -120,24 +129,19 @@ static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
-static void guc_ggtt_invalidate(struct drm_i915_private *i915)
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
- gen6_ggtt_invalidate(i915);
+ gen6_ggtt_invalidate(ggtt);
intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
-static void gmch_ggtt_invalidate(struct drm_i915_private *i915)
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
{
intel_gtt_chipset_flush();
}
-static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
-{
- i915->ggtt.invalidate(i915);
-}
-
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
@@ -215,10 +219,10 @@ static u64 gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
- const enum i915_cache_level level)
+static u64 gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
{
- gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ u64 pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE;
@@ -227,9 +231,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
return pde;
}
-#define gen8_pdpe_encode gen8_pde_encode
-#define gen8_pml4e_encode gen8_pde_encode
-
static u64 snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
@@ -482,9 +483,69 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
spin_unlock(&vm->free_pages.lock);
}
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
+}
+
+static void ppgtt_destroy_vma(struct i915_address_space *vm)
+{
+ struct list_head *phases[] = {
+ &vm->bound_list,
+ &vm->unbound_list,
+ NULL,
+ }, **phase;
+
+ mutex_lock(&vm->i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ struct i915_vma *vma, *vn;
+
+ list_for_each_entry_safe(vma, vn, *phase, vm_link)
+ i915_vma_destroy(vma);
+ }
+ mutex_unlock(&vm->i915->drm.struct_mutex);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+ struct i915_address_space *vm =
+ container_of(work, struct i915_address_space, rcu.work);
+
+ ppgtt_destroy_vma(vm);
+
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ GEM_BUG_ON(!list_empty(&vm->unbound_list));
+
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ kfree(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
+
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
+
+ vm->closed = true;
+ queue_rcu_work(vm->i915->wq, &vm->rcu);
+}
+
static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
+ INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -505,19 +566,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
INIT_LIST_HEAD(&vm->bound_list);
}
-static void i915_address_space_fini(struct i915_address_space *vm)
-{
- spin_lock(&vm->free_pages.lock);
- if (pagevec_count(&vm->free_pages.pvec))
- vm_free_pages_release(vm, true);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
- spin_unlock(&vm->free_pages.lock);
-
- drm_mm_takedown(&vm->mm);
-
- mutex_destroy(&vm->mutex);
-}
-
static int __setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
gfp_t gfp)
@@ -554,28 +602,17 @@ static void cleanup_page_dma(struct i915_address_space *vm,
#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
-#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
-#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
-
-static void fill_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u64 val)
+static void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
{
- u64 * const vaddr = kmap_atomic(p->page);
-
- memset64(vaddr, val, PAGE_SIZE / sizeof(val));
-
- kunmap_atomic(vaddr);
+ kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
}
-static void fill_page_dma_32(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u32 v)
-{
- fill_page_dma(vm, p, (u64)v << 32 | v);
-}
+#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
+#define fill32_px(px, v) do { \
+ u64 v__ = lower_32_bits(v); \
+ fill_px((px), v__ << 32 | v__); \
+} while (0)
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
@@ -602,7 +639,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
do {
- int order = get_order(size);
+ unsigned int order = get_order(size);
struct page *page;
dma_addr_t addr;
@@ -621,8 +658,8 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!IS_ALIGNED(addr, size)))
goto unmap_page;
- vm->scratch_page.page = page;
- vm->scratch_page.daddr = addr;
+ vm->scratch[0].base.page = page;
+ vm->scratch[0].base.daddr = addr;
vm->scratch_order = order;
return 0;
@@ -641,14 +678,30 @@ skip:
static void cleanup_scratch_page(struct i915_address_space *vm)
{
- struct i915_page_dma *p = &vm->scratch_page;
- int order = vm->scratch_order;
+ struct i915_page_dma *p = px_base(&vm->scratch[0]);
+ unsigned int order = vm->scratch_order;
dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
PCI_DMA_BIDIRECTIONAL);
__free_pages(p->page, order);
}
+static void free_scratch(struct i915_address_space *vm)
+{
+ int i;
+
+ if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
+ return;
+
+ for (i = 1; i <= vm->top; i++) {
+ if (!px_dma(&vm->scratch[i]))
+ break;
+ cleanup_page_dma(vm, px_base(&vm->scratch[i]));
+ }
+
+ cleanup_scratch_page(vm);
+}
+
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
@@ -657,50 +710,24 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_px(vm, pt))) {
+ if (unlikely(setup_page_dma(vm, &pt->base))) {
kfree(pt);
return ERR_PTR(-ENOMEM);
}
atomic_set(&pt->used, 0);
-
return pt;
}
-static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
-{
- cleanup_px(vm, pt);
- kfree(pt);
-}
-
-static void gen8_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- fill_px(vm, pt, vm->scratch_pte);
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- fill32_px(vm, pt, vm->scratch_pte);
-}
-
-static struct i915_page_directory *__alloc_pd(void)
+static struct i915_page_directory *__alloc_pd(size_t sz)
{
struct i915_page_directory *pd;
- pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
-
+ pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return NULL;
- memset(&pd->base, 0, sizeof(pd->base));
- atomic_set(&pd->used, 0);
spin_lock_init(&pd->lock);
-
- /* for safety */
- pd->entry[0] = NULL;
-
return pd;
}
@@ -708,11 +735,11 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = __alloc_pd();
+ pd = __alloc_pd(sizeof(*pd));
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_px(vm, pd))) {
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
kfree(pd);
return ERR_PTR(-ENOMEM);
}
@@ -720,36 +747,73 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
return pd;
}
-static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
+static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
{
- return pd->base.page;
+ cleanup_page_dma(vm, pd);
+ kfree(pd);
}
-static void free_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
+#define free_px(vm, px) free_pd(vm, px_base(px))
+
+static inline void
+write_dma_entry(struct i915_page_dma * const pdma,
+ const unsigned short idx,
+ const u64 encoded_entry)
{
- if (likely(pd_has_phys_page(pd)))
- cleanup_px(vm, pd);
+ u64 * const vaddr = kmap_atomic(pdma->page);
- kfree(pd);
+ vaddr[idx] = encoded_entry;
+ kunmap_atomic(vaddr);
}
-static void init_pd_with_page(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_table *pt)
+static inline void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
- fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
- memset_p(pd->entry, pt, 512);
+ /* Each thread pre-pins the pd, and we may have a thread per pde. */
+ GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
+
+ atomic_inc(px_used(pd));
+ pd->entry[idx] = to;
+ write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
}
-static void init_pd(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_directory * const to)
+#define set_pd_entry(pd, idx, to) \
+ __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
+
+static inline void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch)
{
- GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
+ GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
- fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
- memset_p(pd->entry, to, 512);
+ write_dma_entry(px_base(pd), idx, scratch->encode);
+ pd->entry[idx] = NULL;
+ atomic_dec(px_used(pd));
+}
+
+static bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch)
+{
+ bool free = false;
+
+ if (atomic_add_unless(&pt->used, -1, 1))
+ return false;
+
+ spin_lock(&pd->lock);
+ if (atomic_dec_and_test(&pt->used)) {
+ clear_pd_entry(pd, idx, scratch);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
+
+ return free;
}
/*
@@ -763,165 +827,331 @@ static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
ppgtt->pd_dirty_engines = ALL_ENGINES;
}
-/* Removes entries from a single page table, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries.
- */
-static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
- struct i915_page_table *pt,
- u64 start, u64 length)
+static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
- unsigned int num_entries = gen8_pte_count(start, length);
- gen8_pte_t *vaddr;
+ struct drm_i915_private *dev_priv = ppgtt->vm.i915;
+ enum vgt_g2v_type msg;
+ int i;
- vaddr = kmap_atomic_px(pt);
- memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
- kunmap_atomic(vaddr);
+ if (create)
+ atomic_inc(px_used(ppgtt->pd)); /* never remove */
+ else
+ atomic_dec(px_used(ppgtt->pd));
+
+ mutex_lock(&dev_priv->vgpu.lock);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const u64 daddr = px_dma(ppgtt->pd);
+
+ I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+ I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+
+ msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
+ } else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
+ I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
+ }
+
+ msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
+ }
+
+ /* g2v_notify atomically (via hv trap) consumes the message packet. */
+ I915_WRITE(vgtif_reg(g2v_notify), msg);
- GEM_BUG_ON(num_entries > atomic_read(&pt->used));
- return !atomic_sub_return(num_entries, &pt->used);
+ mutex_unlock(&dev_priv->vgpu.lock);
}
-static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- struct i915_page_table *pt,
- unsigned int pde)
+/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
+#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
+#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
+#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
+#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
+#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
+#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
+#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
+
+static inline unsigned int
+gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
{
- gen8_pde_t *vaddr;
+ const int shift = gen8_pd_shift(lvl);
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
- vaddr = kmap_atomic_px(pd);
- vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
+ GEM_BUG_ON(start >= end);
+ end += ~mask >> gen8_pd_shift(1);
+
+ *idx = i915_pde_index(start, shift);
+ if ((start ^ end) & mask)
+ return GEN8_PDES - *idx;
+ else
+ return i915_pde_index(end, shift) - *idx;
}
-static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
+static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
{
- struct i915_page_table *pt;
- u32 pde;
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
- gen8_for_each_pde(pt, pd, start, length, pde) {
- bool free = false;
+ GEM_BUG_ON(start >= end);
+ return (start ^ end) & mask && (start & ~mask) == 0;
+}
- GEM_BUG_ON(pt == vm->scratch_pt);
+static inline unsigned int gen8_pt_count(u64 start, u64 end)
+{
+ GEM_BUG_ON(start >= end);
+ if ((start ^ end) >> gen8_pd_shift(1))
+ return GEN8_PDES - (start & (GEN8_PDES - 1));
+ else
+ return end - start;
+}
- if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
- continue;
+static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
+{
+ unsigned int shift = __gen8_pte_shift(vm->top);
+ return (vm->total + (1ull << shift) - 1) >> shift;
+}
- spin_lock(&pd->lock);
- if (!atomic_read(&pt->used)) {
- gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
- pd->entry[pde] = vm->scratch_pt;
+static inline struct i915_page_directory *
+gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
- GEM_BUG_ON(!atomic_read(&pd->used));
- atomic_dec(&pd->used);
- free = true;
- }
- spin_unlock(&pd->lock);
- if (free)
- free_pt(vm, pt);
+ if (vm->top == 2)
+ return ppgtt->pd;
+ else
+ return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
+{
+ return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+}
+
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ int count, int lvl)
+{
+ if (lvl) {
+ void **pde = pd->entry;
+
+ do {
+ if (!*pde)
+ continue;
+
+ __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+ } while (pde++, --count);
}
- return !atomic_read(&pd->used);
+ free_px(vm, pd);
}
-static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
- struct i915_page_directory *pd,
- unsigned int pdpe)
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- gen8_ppgtt_pdpe_t *vaddr;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (!pd_has_phys_page(pdp))
- return;
+ if (intel_vgpu_active(vm->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, false);
- vaddr = kmap_atomic_px(pdp);
- vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ free_scratch(vm);
}
-/* Removes entries from a single page dir pointer, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries
- */
-static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
- struct i915_page_directory * const pdp,
- u64 start, u64 length)
+static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 start, const u64 end, int lvl)
{
- struct i915_page_directory *pd;
- unsigned int pdpe;
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ unsigned int idx, len;
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- bool free = false;
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
- GEM_BUG_ON(pd == vm->scratch_pd);
+ len = gen8_pd_range(start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
- if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
+ gen8_pd_contains(start, end, lvl)) {
+ DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
+ __func__, vm, lvl + 1, idx, start, end);
+ clear_pd_entry(pd, idx, scratch);
+ __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
+ start += (u64)I915_PDES << gen8_pd_shift(lvl);
continue;
+ }
+
+ if (lvl) {
+ start = __gen8_ppgtt_clear(vm, as_pd(pt),
+ start, end, lvl);
+ } else {
+ unsigned int count;
+ u64 *vaddr;
- spin_lock(&pdp->lock);
- if (!atomic_read(&pd->used)) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- pdp->entry[pdpe] = vm->scratch_pd;
+ count = gen8_pt_count(start, end);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
+ __func__, vm, lvl, start, end,
+ gen8_pd_index(start, 0), count,
+ atomic_read(&pt->used));
+ GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
- GEM_BUG_ON(!atomic_read(&pdp->used));
- atomic_dec(&pdp->used);
- free = true;
+ vaddr = kmap_atomic_px(pt);
+ memset64(vaddr + gen8_pd_index(start, 0),
+ vm->scratch[0].encode,
+ count);
+ kunmap_atomic(vaddr);
+
+ atomic_sub(count, &pt->used);
+ start += count;
}
- spin_unlock(&pdp->lock);
- if (free)
- free_pd(vm, pd);
- }
- return !atomic_read(&pdp->used);
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ } while (idx++, --len);
+
+ return start;
}
-static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
+static void gen8_ppgtt_clear(struct i915_address_space *vm,
+ u64 start, u64 length)
{
- gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ start, start + length, vm->top);
}
-static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
- struct i915_page_directory *pdp,
- unsigned int pml4e)
+static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 * const start, const u64 end, int lvl)
{
- gen8_ppgtt_pml4e_t *vaddr;
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ struct i915_page_table *alloc = NULL;
+ unsigned int idx, len;
+ int ret = 0;
- vaddr = kmap_atomic_px(pml4);
- vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
+ len = gen8_pd_range(*start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
+ __func__, vm, lvl + 1, *start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
+ spin_lock(&pd->lock);
+ GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (!pt) {
+ spin_unlock(&pd->lock);
+
+ DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+ __func__, vm, lvl + 1, idx);
+
+ pt = fetch_and_zero(&alloc);
+ if (lvl) {
+ if (!pt) {
+ pt = &alloc_pd(vm)->pt;
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ fill_px(pt, vm->scratch[lvl].encode);
+ } else {
+ if (!pt) {
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ if (intel_vgpu_active(vm->i915) ||
+ gen8_pt_count(*start, end) < I915_PDES)
+ fill_px(pt, vm->scratch[lvl].encode);
+ }
+
+ spin_lock(&pd->lock);
+ if (likely(!pd->entry[idx]))
+ set_pd_entry(pd, idx, pt);
+ else
+ alloc = pt, pt = pd->entry[idx];
+ }
+
+ if (lvl) {
+ atomic_inc(&pt->used);
+ spin_unlock(&pd->lock);
+
+ ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
+ start, end, lvl);
+ if (unlikely(ret)) {
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ goto out;
+ }
+
+ spin_lock(&pd->lock);
+ atomic_dec(&pt->used);
+ GEM_BUG_ON(!atomic_read(&pt->used));
+ } else {
+ unsigned int count = gen8_pt_count(*start, end);
+
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
+ __func__, vm, lvl, *start, end,
+ gen8_pd_index(*start, 0), count,
+ atomic_read(&pt->used));
+
+ atomic_add(count, &pt->used);
+ /* All other pdes may be simultaneously removed */
+ GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
+ *start += count;
+ }
+ } while (idx++, --len);
+ spin_unlock(&pd->lock);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ return ret;
}
-/* Removes entries from a single pml4.
- * This is the top-level structure in 4-level page tables used on gen8+.
- * Empty entries are always scratch pml4e.
- */
-static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
+static int gen8_ppgtt_alloc(struct i915_address_space *vm,
+ u64 start, u64 length)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pml4 = ppgtt->pd;
- struct i915_page_directory *pdp;
- unsigned int pml4e;
+ u64 from;
+ int err;
- GEM_BUG_ON(!i915_vm_is_4lvl(vm));
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- bool free = false;
- GEM_BUG_ON(pdp == vm->scratch_pdp);
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+ from = start;
- if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
- continue;
+ err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
+ if (unlikely(err && from != start))
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ from, start, vm->top);
- spin_lock(&pml4->lock);
- if (!atomic_read(&pdp->used)) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- pml4->entry[pml4e] = vm->scratch_pdp;
- free = true;
- }
- spin_unlock(&pml4->lock);
- if (free)
- free_pd(vm, pdp);
- }
+ return err;
}
static inline struct sgt_dma {
@@ -933,47 +1163,28 @@ static inline struct sgt_dma {
return (struct sgt_dma) { sg, addr, addr + sg->length };
}
-struct gen8_insert_pte {
- u16 pml4e;
- u16 pdpe;
- u16 pde;
- u16 pte;
-};
-
-static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
-{
- return (struct gen8_insert_pte) {
- gen8_pml4e_index(start),
- gen8_pdpe_index(start),
- gen8_pde_index(start),
- gen8_pte_index(start),
- };
-}
-
-static __always_inline bool
-gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
- struct i915_page_directory *pdp,
- struct sgt_dma *iter,
- struct gen8_insert_pte *idx,
- enum i915_cache_level cache_level,
- u32 flags)
+static __always_inline u64
+gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
+ struct sgt_dma *iter,
+ u64 idx,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_page_directory *pd;
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr;
- bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = i915_pd_entry(pdp, idx->pdpe);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
+ pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
- vaddr[idx->pte] = pte_encode | iter->dma;
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg) {
- ret = false;
+ idx = 0;
break;
}
@@ -981,91 +1192,68 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
iter->max = iter->dma + iter->sg->length;
}
- if (++idx->pte == GEN8_PTES) {
- idx->pte = 0;
-
- if (++idx->pde == I915_PDES) {
- idx->pde = 0;
-
+ if (gen8_pd_index(++idx, 0) == 0) {
+ if (gen8_pd_index(idx, 1) == 0) {
/* Limited by sg length for 3lvl */
- if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
- idx->pdpe = 0;
- ret = true;
+ if (gen8_pd_index(idx, 2) == 0)
break;
- }
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = pdp->entry[idx->pdpe];
+ pd = pdp->entry[gen8_pd_index(idx, 2)];
}
kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
kunmap_atomic(vaddr);
- return ret;
+ return idx;
}
-static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
+static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+ struct sgt_dma *iter,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
-
- gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx,
- cache_level, flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-}
-
-static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
- struct i915_page_directory *pml4,
- struct sgt_dma *iter,
- enum i915_cache_level cache_level,
- u32 flags)
-{
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length;
+ GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+
do {
- struct gen8_insert_pte idx = gen8_insert_pte(start);
- struct i915_page_directory *pdp =
- i915_pdp_entry(pml4, idx.pml4e);
- struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe);
- unsigned int page_size;
- bool maybe_64K = false;
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_address(vma->vm, start);
+ struct i915_page_directory * const pd =
+ i915_pd_entry(pdp, __gen8_pte_index(start, 2));
gen8_pte_t encode = pte_encode;
+ unsigned int maybe_64K = -1;
+ unsigned int page_size;
gen8_pte_t *vaddr;
- u16 index, max;
+ u16 index;
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
- rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
- index = idx.pde;
- max = I915_PDES;
- page_size = I915_GTT_PAGE_SIZE_2M;
-
+ rem >= I915_GTT_PAGE_SIZE_2M &&
+ !__gen8_pte_index(start, 0)) {
+ index = __gen8_pte_index(start, 1);
encode |= GEN8_PDE_PS_2M;
+ page_size = I915_GTT_PAGE_SIZE_2M;
vaddr = kmap_atomic_px(pd);
} else {
- struct i915_page_table *pt = i915_pt_entry(pd, idx.pde);
+ struct i915_page_table *pt =
+ i915_pt_entry(pd, __gen8_pte_index(start, 1));
- index = idx.pte;
- max = GEN8_PTES;
+ index = __gen8_pte_index(start, 0);
page_size = I915_GTT_PAGE_SIZE;
if (!index &&
vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) * I915_GTT_PAGE_SIZE))
- maybe_64K = true;
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+ maybe_64K = __gen8_pte_index(start, 1);
vaddr = kmap_atomic_px(pt);
}
@@ -1086,16 +1274,16 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
iter->dma = sg_dma_address(iter->sg);
iter->max = iter->dma + rem;
- if (maybe_64K && index < max &&
+ if (maybe_64K != -1 && index < I915_PDES &&
!(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) * I915_GTT_PAGE_SIZE)))
- maybe_64K = false;
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+ maybe_64K = -1;
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
break;
}
- } while (rem >= page_size && index < max);
+ } while (rem >= page_size && index < I915_PDES);
kunmap_atomic(vaddr);
@@ -1105,14 +1293,14 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
* it and have reached the end of the sg table and we have
* enough padding.
*/
- if (maybe_64K &&
- (index == max ||
+ if (maybe_64K != -1 &&
+ (index == I915_PDES ||
(i915_vm_has_scratch_64K(vma->vm) &&
!iter->sg && IS_ALIGNED(vma->node.start +
vma->node.size,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = kmap_atomic_px(pd);
- vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+ vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
@@ -1128,9 +1316,8 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
- encode = vma->vm->scratch_pte;
- vaddr = kmap_atomic_px(i915_pt_entry(pd,
- idx.pde));
+ encode = vma->vm->scratch[0].encode;
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
@@ -1143,45 +1330,35 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
} while (iter->sg);
}
-static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static void gen8_ppgtt_insert(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
- struct i915_page_directory * const pml4 = ppgtt->pd;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
- flags);
- } else {
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+ gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+ } else {
+ u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
- while (gen8_ppgtt_insert_pte_entries(ppgtt,
- i915_pdp_entry(pml4, idx.pml4e++),
- &iter, &idx, cache_level,
- flags))
- GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
- }
-}
+ idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
+ cache_level, flags);
+ } while (idx);
-static void gen8_free_page_tables(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- int i;
-
- for (i = 0; i < I915_PDES; i++) {
- if (pd->entry[i] != vm->scratch_pt)
- free_pt(vm, pd->entry[i]);
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
+ int i;
/*
* If everybody agrees to not to write into the scratch page,
@@ -1195,10 +1372,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
vm->scratch_order = clone->scratch_order;
- vm->scratch_pte = clone->scratch_pte;
- vm->scratch_pt = clone->scratch_pt;
- vm->scratch_pd = clone->scratch_pd;
- vm->scratch_pdp = clone->scratch_pdp;
+ memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
+ px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
return 0;
}
@@ -1206,377 +1381,88 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (ret)
return ret;
- vm->scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC,
- vm->has_read_only);
-
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
- ret = PTR_ERR(vm->scratch_pt);
- goto free_scratch_page;
- }
+ vm->scratch[0].encode =
+ gen8_pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_LLC, vm->has_read_only);
- vm->scratch_pd = alloc_pd(vm);
- if (IS_ERR(vm->scratch_pd)) {
- ret = PTR_ERR(vm->scratch_pd);
- goto free_pt;
- }
+ for (i = 1; i <= vm->top; i++) {
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+ goto free_scratch;
- if (i915_vm_is_4lvl(vm)) {
- vm->scratch_pdp = alloc_pd(vm);
- if (IS_ERR(vm->scratch_pdp)) {
- ret = PTR_ERR(vm->scratch_pdp);
- goto free_pd;
- }
+ fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
+ vm->scratch[i].encode =
+ gen8_pde_encode(px_dma(&vm->scratch[i]),
+ I915_CACHE_LLC);
}
- gen8_initialize_pt(vm, vm->scratch_pt);
- init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
- if (i915_vm_is_4lvl(vm))
- init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
-
return 0;
-free_pd:
- free_pd(vm, vm->scratch_pd);
-free_pt:
- free_pt(vm, vm->scratch_pt);
-free_scratch_page:
- cleanup_scratch_page(vm);
-
- return ret;
+free_scratch:
+ free_scratch(vm);
+ return -ENOMEM;
}
-static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
- struct drm_i915_private *dev_priv = vm->i915;
- enum vgt_g2v_type msg;
- int i;
-
- if (i915_vm_is_4lvl(vm)) {
- const u64 daddr = px_dma(ppgtt->pd);
+ struct i915_page_directory *pd = ppgtt->pd;
+ unsigned int idx;
- I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+ GEM_BUG_ON(vm->top != 2);
+ GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
- msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
- } else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++) {
- const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+ for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+ struct i915_page_directory *pde;
- I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
- }
+ pde = alloc_pd(vm);
+ if (IS_ERR(pde))
+ return PTR_ERR(pde);
- msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
+ fill_px(pde, vm->scratch[1].encode);
+ set_pd_entry(pd, idx, pde);
+ atomic_inc(px_used(pde)); /* keep pinned */
}
- I915_WRITE(vgtif_reg(g2v_notify), msg);
-
return 0;
}
-static void gen8_free_scratch(struct i915_address_space *vm)
-{
- if (!vm->scratch_page.daddr)
- return;
-
- if (i915_vm_is_4lvl(vm))
- free_pd(vm, vm->scratch_pdp);
- free_pd(vm, vm->scratch_pd);
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
-}
-
-static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
- struct i915_page_directory *pdp)
-{
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- int i;
-
- for (i = 0; i < pdpes; i++) {
- if (pdp->entry[i] == vm->scratch_pd)
- continue;
-
- gen8_free_page_tables(vm, pdp->entry[i]);
- free_pd(vm, pdp->entry[i]);
- }
-
- free_pd(vm, pdp);
-}
-
-static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
-{
- struct i915_page_directory * const pml4 = ppgtt->pd;
- int i;
-
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- struct i915_page_directory *pdp = i915_pdp_entry(pml4, i);
-
- if (pdp == ppgtt->vm.scratch_pdp)
- continue;
-
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
- }
-
- free_pd(&ppgtt->vm, pml4);
-}
-
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
- if (intel_vgpu_active(i915))
- gen8_ppgtt_notify_vgt(ppgtt, false);
-
- if (i915_vm_is_4lvl(vm))
- gen8_ppgtt_cleanup_4lvl(ppgtt);
- else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd);
-
- gen8_free_scratch(vm);
-}
-
-static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
-{
- struct i915_page_table *pt, *alloc = NULL;
- u64 from = start;
- unsigned int pde;
- int ret = 0;
-
- spin_lock(&pd->lock);
- gen8_for_each_pde(pt, pd, start, length, pde) {
- const int count = gen8_pte_count(start, length);
-
- if (pt == vm->scratch_pt) {
- spin_unlock(&pd->lock);
-
- pt = fetch_and_zero(&alloc);
- if (!pt)
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind;
- }
-
- if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
- gen8_initialize_pt(vm, pt);
-
- spin_lock(&pd->lock);
- if (pd->entry[pde] == vm->scratch_pt) {
- gen8_ppgtt_set_pde(vm, pd, pt, pde);
- pd->entry[pde] = pt;
- atomic_inc(&pd->used);
- } else {
- alloc = pt;
- pt = pd->entry[pde];
- }
- }
-
- atomic_add(count, &pt->used);
- }
- spin_unlock(&pd->lock);
- goto out;
-
-unwind:
- gen8_ppgtt_clear_pd(vm, pd, from, start - from);
-out:
- if (alloc)
- free_pt(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
- struct i915_page_directory *pdp,
- u64 start, u64 length)
-{
- struct i915_page_directory *pd, *alloc = NULL;
- u64 from = start;
- unsigned int pdpe;
- int ret = 0;
-
- spin_lock(&pdp->lock);
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- if (pd == vm->scratch_pd) {
- spin_unlock(&pdp->lock);
-
- pd = fetch_and_zero(&alloc);
- if (!pd)
- pd = alloc_pd(vm);
- if (IS_ERR(pd)) {
- ret = PTR_ERR(pd);
- goto unwind;
- }
-
- init_pd_with_page(vm, pd, vm->scratch_pt);
-
- spin_lock(&pdp->lock);
- if (pdp->entry[pdpe] == vm->scratch_pd) {
- gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
- pdp->entry[pdpe] = pd;
- atomic_inc(&pdp->used);
- } else {
- alloc = pd;
- pd = pdp->entry[pdpe];
- }
- }
- atomic_inc(&pd->used);
- spin_unlock(&pdp->lock);
-
- ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
- if (unlikely(ret))
- goto unwind_pd;
-
- spin_lock(&pdp->lock);
- atomic_dec(&pd->used);
- }
- spin_unlock(&pdp->lock);
- goto out;
-
-unwind_pd:
- spin_lock(&pdp->lock);
- if (atomic_dec_and_test(&pd->used)) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- pdp->entry[pdpe] = vm->scratch_pd;
- GEM_BUG_ON(!atomic_read(&pdp->used));
- atomic_dec(&pdp->used);
- GEM_BUG_ON(alloc);
- alloc = pd; /* defer the free to after the lock */
- }
- spin_unlock(&pdp->lock);
-unwind:
- gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
-out:
- if (alloc)
- free_pd(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- return gen8_ppgtt_alloc_pdp(vm,
- i915_vm_to_ppgtt(vm)->pd, start, length);
-}
-
-static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
+static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pml4 = ppgtt->pd;
- struct i915_page_directory *pdp, *alloc = NULL;
- u64 from = start;
- int ret = 0;
- u32 pml4e;
-
- spin_lock(&pml4->lock);
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pdp == vm->scratch_pdp) {
- spin_unlock(&pml4->lock);
-
- pdp = fetch_and_zero(&alloc);
- if (!pdp)
- pdp = alloc_pd(vm);
- if (IS_ERR(pdp)) {
- ret = PTR_ERR(pdp);
- goto unwind;
- }
-
- init_pd(vm, pdp, vm->scratch_pd);
+ struct drm_i915_private *i915 = gt->i915;
- spin_lock(&pml4->lock);
- if (pml4->entry[pml4e] == vm->scratch_pdp) {
- gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
- pml4->entry[pml4e] = pdp;
- } else {
- alloc = pdp;
- pdp = pml4->entry[pml4e];
- }
- }
- atomic_inc(&pdp->used);
- spin_unlock(&pml4->lock);
-
- ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
- if (unlikely(ret))
- goto unwind_pdp;
+ ppgtt->vm.gt = gt;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
- spin_lock(&pml4->lock);
- atomic_dec(&pdp->used);
- }
- spin_unlock(&pml4->lock);
- goto out;
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-unwind_pdp:
- spin_lock(&pml4->lock);
- if (atomic_dec_and_test(&pdp->used)) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- pml4->entry[pml4e] = vm->scratch_pdp;
- GEM_BUG_ON(alloc);
- alloc = pdp; /* defer the free until after the lock */
- }
- spin_unlock(&pml4->lock);
-unwind:
- gen8_ppgtt_clear_4lvl(vm, from, start - from);
-out:
- if (alloc)
- free_pd(vm, alloc);
- return ret;
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
}
-static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
{
- struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory *pdp = ppgtt->pd;
+ const unsigned int count = gen8_pd_top_count(vm);
struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->vm.total;
- u64 from = start;
- unsigned int pdpe;
-
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- pd = alloc_pd(vm);
- if (IS_ERR(pd))
- goto unwind;
-
- init_pd_with_page(vm, pd, vm->scratch_pt);
- gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
-
- atomic_inc(&pdp->used);
- }
- atomic_inc(&pdp->used); /* never remove */
+ GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
- return 0;
+ pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
-unwind:
- start -= from;
- gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- free_pd(vm, pd);
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
}
- atomic_set(&pdp->used, 0);
- return -ENOMEM;
-}
-static void ppgtt_init(struct drm_i915_private *i915,
- struct i915_ppgtt *ppgtt)
-{
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
+ fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+ atomic_inc(px_used(pd)); /* mark as pinned */
+ return pd;
}
/*
@@ -1595,7 +1481,8 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ppgtt_init(i915, ppgtt);
+ ppgtt_init(ppgtt, &i915->gt);
+ ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
/*
* From bdw, there is hw support for read-only pages in the PPGTT.
@@ -1615,41 +1502,24 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
- ppgtt->pd = __alloc_pd();
- if (!ppgtt->pd) {
- err = -ENOMEM;
+ ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(ppgtt->pd)) {
+ err = PTR_ERR(ppgtt->pd);
goto err_free_scratch;
}
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- err = setup_px(&ppgtt->vm, ppgtt->pd);
- if (err)
- goto err_free_pdp;
-
- init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
-
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
- ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
- } else {
- /*
- * We don't need to setup dma for top level pdp, only
- * for entries. So point entries to scratch.
- */
- memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
- GEN8_3LVL_PDPES);
-
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
if (intel_vgpu_active(i915)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
if (err)
- goto err_free_pdp;
+ goto err_free_pd;
}
-
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
- ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
}
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+
if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
@@ -1657,10 +1527,11 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
-err_free_pdp:
- free_pd(&ppgtt->vm, ppgtt->pd);
+err_free_pd:
+ __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+ gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
err_free_scratch:
- gen8_free_scratch(&ppgtt->vm);
+ free_scratch(&ppgtt->vm);
err_free:
kfree(ppgtt);
return ERR_PTR(err);
@@ -1676,25 +1547,26 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
ppgtt->pd_addr + pde);
}
-static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen7_ppgtt_enable(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
- u32 ecochk, ecobits;
enum intel_engine_id id;
+ u32 ecochk;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+ intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
- ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev_priv)) {
+ ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ if (IS_HASWELL(i915)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
- I915_WRITE(GAM_ECOCHK, ecochk);
+ intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
/* GFX_MODE is per-ring on gen7+ */
ENGINE_WRITE(engine,
RING_MODE_GEN7,
@@ -1702,22 +1574,29 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
}
}
-static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen6_ppgtt_enable(struct intel_gt *gt)
{
- u32 ecochk, gab_ctl, ecobits;
+ struct intel_uncore *uncore = gt->uncore;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
- ECOBITS_PPGTT_CACHE64B);
+ intel_uncore_rmw(uncore,
+ GAC_ECO_BITS,
+ 0,
+ ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+ intel_uncore_rmw(uncore,
+ GAB_CTL,
+ 0,
+ GAB_CTL_CONT_AFTER_PAGEFAULT);
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+ intel_uncore_rmw(uncore,
+ GAM_ECOCHK,
+ 0,
+ ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
- if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+ intel_uncore_write(uncore,
+ GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1726,7 +1605,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
{
struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch_pte;
+ const gen6_pte_t scratch_pte = vm->scratch[0].encode;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
@@ -1737,7 +1616,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
- GEM_BUG_ON(pt == vm->scratch_pt);
+ GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
num_entries -= count;
@@ -1774,7 +1653,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
- GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt);
+ GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do {
@@ -1819,7 +1698,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
- if (pt == vm->scratch_pt) {
+ if (px_base(pt) == px_base(&vm->scratch[1])) {
spin_unlock(&pd->lock);
pt = fetch_and_zero(&alloc);
@@ -1830,10 +1709,10 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
goto unwind_out;
}
- gen6_initialize_pt(vm, pt);
+ fill32_px(pt, vm->scratch[0].encode);
spin_lock(&pd->lock);
- if (pd->entry[pde] == vm->scratch_pt) {
+ if (pd->entry[pde] == &vm->scratch[1]) {
pd->entry[pde] = pt;
if (i915_vma_is_bound(ppgtt->vma,
I915_VMA_GLOBAL_BIND)) {
@@ -1852,7 +1731,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (flush) {
mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(vm->i915);
+ gen6_ggtt_invalidate(vm->gt->ggtt);
}
goto out;
@@ -1861,7 +1740,7 @@ unwind_out:
gen6_ppgtt_clear_range(vm, from, start - from);
out:
if (alloc)
- free_pt(vm, alloc);
+ free_px(vm, alloc);
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return ret;
}
@@ -1870,108 +1749,52 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
{
struct i915_address_space * const vm = &ppgtt->base.vm;
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *unused;
- u32 pde;
int ret;
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
- vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_NONE,
- PTE_READ_ONLY);
+ vm->scratch[0].encode =
+ vm->pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_NONE, PTE_READ_ONLY);
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
cleanup_scratch_page(vm);
- return PTR_ERR(vm->scratch_pt);
+ return -ENOMEM;
}
- gen6_initialize_pt(vm, vm->scratch_pt);
-
- gen6_for_all_pdes(unused, pd, pde)
- pd->entry[pde] = vm->scratch_pt;
+ fill32_px(&vm->scratch[1], vm->scratch[0].encode);
+ memset_p(pd->entry, &vm->scratch[1], I915_PDES);
return 0;
}
-static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
-{
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
-}
-
static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
{
struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
struct i915_page_table *pt;
u32 pde;
gen6_for_all_pdes(pt, pd, pde)
- if (pt != ppgtt->base.vm.scratch_pt)
- free_pt(&ppgtt->base.vm, pt);
-}
-
-struct gen6_ppgtt_cleanup_work {
- struct work_struct base;
- struct i915_vma *vma;
-};
-
-static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
-{
- struct gen6_ppgtt_cleanup_work *work =
- container_of(wrk, typeof(*work), base);
- /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
- struct drm_i915_private *i915 = work->vma->vm->i915;
-
- mutex_lock(&i915->drm.struct_mutex);
- i915_vma_destroy(work->vma);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kfree(work);
-}
-
-static int nop_set_pages(struct i915_vma *vma)
-{
- return -ENODEV;
-}
-
-static void nop_clear_pages(struct i915_vma *vma)
-{
-}
-
-static int nop_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- return -ENODEV;
+ if (px_base(pt) != scratch)
+ free_px(&ppgtt->base.vm, pt);
}
-static void nop_unbind(struct i915_vma *vma)
-{
-}
-
-static const struct i915_vma_ops nop_vma_ops = {
- .set_pages = nop_set_pages,
- .clear_pages = nop_clear_pages,
- .bind_vma = nop_bind,
- .unbind_vma = nop_unbind,
-};
-
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
+ struct drm_i915_private *i915 = vm->i915;
/* FIXME remove the struct_mutex to bring the locking under control */
- INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
- work->vma = ppgtt->vma;
- work->vma->ops = &nop_vma_ops;
- schedule_work(&work->base);
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_vma_destroy(ppgtt->vma);
+ mutex_unlock(&i915->drm.struct_mutex);
gen6_ppgtt_free_pd(ppgtt);
- gen6_ppgtt_free_scratch(vm);
+ free_scratch(vm);
kfree(ppgtt->base.pd);
}
@@ -1998,14 +1821,14 @@ static int pd_vma_bind(struct i915_vma *vma,
struct i915_page_table *pt;
unsigned int pde;
- ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
gen6_write_pde(ppgtt, pde, pt);
mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(ppgtt->base.vm.i915);
+ gen6_ggtt_invalidate(ggtt);
return 0;
}
@@ -2014,7 +1837,8 @@ static void pd_vma_unbind(struct i915_vma *vma)
{
struct gen6_ppgtt *ppgtt = vma->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
struct i915_page_table *pt;
unsigned int pde;
@@ -2023,11 +1847,11 @@ static void pd_vma_unbind(struct i915_vma *vma)
/* Free all no longer used page tables */
gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
- if (atomic_read(&pt->used) || pt == scratch_pt)
+ if (px_base(pt) == scratch || atomic_read(&pt->used))
continue;
- free_pt(&ppgtt->base.vm, pt);
- pd->entry[pde] = scratch_pt;
+ free_px(&ppgtt->base.vm, pt);
+ pd->entry[pde] = scratch;
}
ppgtt->scan_for_unused_pt = false;
@@ -2043,7 +1867,7 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma;
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
@@ -2053,8 +1877,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &vma->active, NULL);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
+ i915_active_init(i915, &vma->active, NULL, NULL);
vma->vm = &ggtt->vm;
vma->ops = &pd_vma_ops;
@@ -2141,7 +1964,8 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ppgtt_init(i915, &ppgtt->base);
+ ppgtt_init(&ppgtt->base, &i915->gt);
+ ppgtt->base.vm.top = 1;
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
@@ -2150,16 +1974,10 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
- ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
- if (!ppgtt->work) {
- err = -ENOMEM;
- goto err_free;
- }
-
- ppgtt->base.pd = __alloc_pd();
+ ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
if (!ppgtt->base.pd) {
err = -ENOMEM;
- goto err_work;
+ goto err_free;
}
err = gen6_ppgtt_init_scratch(ppgtt);
@@ -2175,31 +1993,40 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
return &ppgtt->base;
err_scratch:
- gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+ free_scratch(&ppgtt->base.vm);
err_pd:
kfree(ppgtt->base.pd);
-err_work:
- kfree(ppgtt->work);
err_free:
kfree(ppgtt);
return ERR_PTR(err);
}
-static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+static void gtt_write_workarounds(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+ if (IS_BROADWELL(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+ else if (IS_CHERRYVIEW(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+ else if (IS_GEN9_LP(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+ else if (INTEL_GEN(i915) >= 9)
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
/*
* To support 64K PTEs we need to first enable the use of the
@@ -2212,21 +2039,45 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* 32K pages, but we don't currently have any support for it in our
* driver.
*/
- if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
- INTEL_GEN(dev_priv) <= 10)
- I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
- I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
- GAMW_ECO_ENABLE_64K_IPS_FIELD);
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(i915) <= 10)
+ intel_uncore_rmw(uncore,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ 0,
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
+
+ if (IS_GEN_RANGE(i915, 8, 11)) {
+ bool can_use_gtt_cache = true;
+
+ /*
+ * According to the BSpec if we use 2M/1G pages then we also
+ * need to disable the GTT cache. At least on BDW we can see
+ * visual corruption when using 2M pages, and not disabling the
+ * GTT cache.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
+ can_use_gtt_cache = false;
+
+ /* WaGttCachingOffByDefault */
+ intel_uncore_write(uncore,
+ HSW_GTT_CACHE_EN,
+ can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
+ WARN_ON_ONCE(can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
+ }
}
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
+int i915_ppgtt_init_hw(struct intel_gt *gt)
{
- gtt_write_workarounds(dev_priv);
+ struct drm_i915_private *i915 = gt->i915;
- if (IS_GEN(dev_priv, 6))
- gen6_ppgtt_enable(dev_priv);
- else if (IS_GEN(dev_priv, 7))
- gen7_ppgtt_enable(dev_priv);
+ gtt_write_workarounds(gt);
+
+ if (IS_GEN(i915, 6))
+ gen6_ppgtt_enable(gt);
+ else if (IS_GEN(i915, 7))
+ gen7_ppgtt_enable(gt);
return 0;
}
@@ -2254,42 +2105,6 @@ i915_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
}
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
-{
- struct list_head *phases[] = {
- &vm->bound_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
-
- vm->closed = true;
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
-
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- i915_vma_destroy(vma);
- }
-}
-
-void i915_vm_release(struct kref *kref)
-{
- struct i915_address_space *vm =
- container_of(kref, struct i915_address_space, ref);
-
- GEM_BUG_ON(i915_is_ggtt(vm));
- trace_i915_ppgtt_release(vm);
-
- ppgtt_destroy_vma(vm);
-
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- GEM_BUG_ON(!list_empty(&vm->unbound_list));
-
- vm->cleanup(vm);
- i915_address_space_fini(vm);
-
- kfree(vm);
-}
-
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -2301,21 +2116,26 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
+static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
- if (INTEL_GEN(dev_priv) < 6)
+ if (INTEL_GEN(i915) < 6)
return;
- i915_check_and_clear_faults(dev_priv);
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- i915_ggtt_invalidate(dev_priv);
+ ggtt->invalidate(ggtt);
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
+{
+ ggtt_suspend_mappings(&i915->ggtt);
}
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
@@ -2361,7 +2181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2389,7 +2209,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* We want to flush the TLBs only after we're certain all the PTE
* updates have finished.
*/
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
@@ -2404,7 +2224,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
iowrite32(vm->pte_encode(addr, level, flags), pte);
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
/*
@@ -2430,7 +2250,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* We want to flush the TLBs only after we're certain all the PTE
* updates have finished.
*/
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
static void nop_clear_range(struct i915_address_space *vm,
@@ -2444,7 +2264,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch_pte;
+ const gen8_pte_t scratch_pte = vm->scratch[0].encode;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2569,8 +2389,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->scratch_pte;
-
+ scratch_pte = vm->scratch[0].encode;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
}
@@ -2657,18 +2476,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = appgtt->vm.allocate_va_range(&appgtt->vm,
- vma->node.start,
- vma->size);
+ ret = alias->vm.allocate_va_range(&alias->vm,
+ vma->node.start,
+ vma->size);
if (ret)
return ret;
}
- appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
- pte_flags);
+ alias->vm.insert_entries(&alias->vm, vma,
+ cache_level, pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2696,7 +2515,8 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
+ struct i915_address_space *vm =
+ &i915_vm_to_ggtt(vma->vm)->alias->vm;
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2753,13 +2573,12 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
*end -= I915_GTT_PAGE_SIZE;
}
-static int init_aliasing_ppgtt(struct drm_i915_private *i915)
+static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(ggtt->vm.i915);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -2778,7 +2597,7 @@ static int init_aliasing_ppgtt(struct drm_i915_private *i915)
if (err)
goto err_ppgtt;
- i915->mm.aliasing_ppgtt = ppgtt;
+ ggtt->alias = ppgtt;
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
@@ -2793,19 +2612,24 @@ err_ppgtt:
return err;
}
-static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
+static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_ppgtt *ppgtt;
- ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ppgtt = fetch_and_zero(&ggtt->alias);
if (!ppgtt)
- return;
+ goto out;
i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+
+out:
+ mutex_unlock(&i915->drm.struct_mutex);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -2834,7 +2658,13 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
drm_mm_remove_node(&ggtt->uc_fw);
}
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+ ggtt_release_guc_top(ggtt);
+ drm_mm_remove_node(&ggtt->error_capture);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
{
/* Let GEM Manage all of the aperture.
*
@@ -2845,7 +2675,6 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
struct drm_mm_node *entry;
int ret;
@@ -2857,9 +2686,9 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
* why.
*/
ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
- intel_wopcm_guc_size(&dev_priv->wopcm));
+ intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
- ret = intel_vgt_balloon(dev_priv);
+ ret = intel_vgt_balloon(ggtt);
if (ret)
return ret;
@@ -2878,7 +2707,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
ret = ggtt_reserve_guc_top(ggtt);
if (ret)
- goto err_reserve;
+ goto err;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
@@ -2891,35 +2720,41 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* And finally clear the reserved guard page */
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
- if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
- ret = init_aliasing_ppgtt(dev_priv);
+ return 0;
+
+err:
+ cleanup_init_ggtt(ggtt);
+ return ret;
+}
+
+int i915_init_ggtt(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = init_ggtt(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+ ret = init_aliasing_ppgtt(&i915->ggtt);
if (ret)
- goto err_appgtt;
+ cleanup_init_ggtt(&i915->ggtt);
}
return 0;
-
-err_appgtt:
- ggtt_release_guc_top(ggtt);
-err_reserve:
- drm_mm_remove_node(&ggtt->error_capture);
- return ret;
}
-/**
- * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev_priv: i915 device
- */
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_vma *vma, *vn;
- struct pagevec *pvec;
ggtt->vm.closed = true;
- mutex_lock(&dev_priv->drm.struct_mutex);
- fini_aliasing_ppgtt(dev_priv);
+ rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
+ flush_workqueue(i915->wq);
+
+ mutex_lock(&i915->drm.struct_mutex);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
@@ -2930,24 +2765,37 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt_release_guc_top(ggtt);
if (drm_mm_initialized(&ggtt->vm.mm)) {
- intel_vgt_deballoon(dev_priv);
+ intel_vgt_deballoon(ggtt);
i915_address_space_fini(&ggtt->vm);
}
ggtt->vm.cleanup(&ggtt->vm);
- pvec = &dev_priv->mm.wc_stash.pvec;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ arch_phys_wc_del(ggtt->mtrr);
+ io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_driver_release - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_release(struct drm_i915_private *i915)
+{
+ struct pagevec *pvec;
+
+ fini_aliasing_ppgtt(&i915->ggtt);
+
+ ggtt_cleanup_hw(&i915->ggtt);
+
+ pvec = &i915->mm.wc_stash.pvec;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->iomap);
-
- i915_gem_cleanup_stolen(dev_priv);
+ i915_gem_cleanup_stolen(i915);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3018,243 +2866,61 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
- ggtt->vm.scratch_pte =
- ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+ ggtt->vm.scratch[0].encode =
+ ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
I915_CACHE_NONE, 0);
return 0;
}
-static struct intel_ppat_entry *
-__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
-{
- struct intel_ppat_entry *entry = &ppat->entries[index];
-
- GEM_BUG_ON(index >= ppat->max_entries);
- GEM_BUG_ON(test_bit(index, ppat->used));
-
- entry->ppat = ppat;
- entry->value = value;
- kref_init(&entry->ref);
- set_bit(index, ppat->used);
- set_bit(index, ppat->dirty);
-
- return entry;
-}
-
-static void __free_ppat_entry(struct intel_ppat_entry *entry)
+static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- struct intel_ppat *ppat = entry->ppat;
- unsigned int index = entry - ppat->entries;
-
- GEM_BUG_ON(index >= ppat->max_entries);
- GEM_BUG_ON(!test_bit(index, ppat->used));
-
- entry->value = ppat->clear_value;
- clear_bit(index, ppat->used);
- set_bit(index, ppat->dirty);
-}
-
-/**
- * intel_ppat_get - get a usable PPAT entry
- * @i915: i915 device instance
- * @value: the PPAT value required by the caller
- *
- * The function tries to search if there is an existing PPAT entry which
- * matches with the required value. If perfectly matched, the existing PPAT
- * entry will be used. If only partially matched, it will try to check if
- * there is any available PPAT index. If yes, it will allocate a new PPAT
- * index for the required entry and update the HW. If not, the partially
- * matched entry will be used.
- */
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value)
-{
- struct intel_ppat *ppat = &i915->ppat;
- struct intel_ppat_entry *entry = NULL;
- unsigned int scanned, best_score;
- int i;
-
- GEM_BUG_ON(!ppat->max_entries);
-
- scanned = best_score = 0;
- for_each_set_bit(i, ppat->used, ppat->max_entries) {
- unsigned int score;
-
- score = ppat->match(ppat->entries[i].value, value);
- if (score > best_score) {
- entry = &ppat->entries[i];
- if (score == INTEL_PPAT_PERFECT_MATCH) {
- kref_get(&entry->ref);
- return entry;
- }
- best_score = score;
- }
- scanned++;
- }
-
- if (scanned == ppat->max_entries) {
- if (!entry)
- return ERR_PTR(-ENOSPC);
-
- kref_get(&entry->ref);
- return entry;
- }
-
- i = find_first_zero_bit(ppat->used, ppat->max_entries);
- entry = __alloc_ppat_entry(ppat, i, value);
- ppat->update_hw(i915);
- return entry;
+ /* TGL doesn't support LLC or AGE settings */
+ I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
}
-static void release_ppat(struct kref *kref)
+static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- struct intel_ppat_entry *entry =
- container_of(kref, struct intel_ppat_entry, ref);
- struct drm_i915_private *i915 = entry->ppat->i915;
-
- __free_ppat_entry(entry);
- entry->ppat->update_hw(i915);
+ I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
+ I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
-/**
- * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
- * @entry: an intel PPAT entry
- *
- * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
- * entry is dynamically allocated, its reference count will be decreased. Once
- * the reference count becomes into zero, the PPAT index becomes free again.
- */
-void intel_ppat_put(const struct intel_ppat_entry *entry)
-{
- struct intel_ppat *ppat = entry->ppat;
- unsigned int index = entry - ppat->entries;
-
- GEM_BUG_ON(!ppat->max_entries);
-
- kref_put(&ppat->entries[index].ref, release_ppat);
-}
-
-static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
-{
- struct intel_ppat *ppat = &dev_priv->ppat;
- int i;
-
- for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
- I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
- clear_bit(i, ppat->dirty);
- }
-}
-
-static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
+/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases. */
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- struct intel_ppat *ppat = &dev_priv->ppat;
- u64 pat = 0;
- int i;
-
- for (i = 0; i < ppat->max_entries; i++)
- pat |= GEN8_PPAT(i, ppat->entries[i].value);
+ u64 pat;
- bitmap_clear(ppat->dirty, 0, ppat->max_entries);
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
-static unsigned int bdw_private_pat_match(u8 src, u8 dst)
-{
- unsigned int score = 0;
- enum {
- AGE_MATCH = BIT(0),
- TC_MATCH = BIT(1),
- CA_MATCH = BIT(2),
- };
-
- /* Cache attribute has to be matched. */
- if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
- return 0;
-
- score |= CA_MATCH;
-
- if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
- score |= TC_MATCH;
-
- if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
- score |= AGE_MATCH;
-
- if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
- return INTEL_PPAT_PERFECT_MATCH;
-
- return score;
-}
-
-static unsigned int chv_private_pat_match(u8 src, u8 dst)
-{
- return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
- INTEL_PPAT_PERFECT_MATCH : 0;
-}
-
-static void cnl_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = cnl_private_pat_update_hw;
- ppat->match = bdw_private_pat_match;
- ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
-
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
- __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
- __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
- * bits. When using advanced contexts each context stores its own PAT, but
- * writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = bdw_private_pat_update_hw;
- ppat->match = bdw_private_pat_match;
- ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
-
- if (!HAS_PPGTT(ppat->i915)) {
- /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * So let's disable cache for GGTT to avoid screen corruptions.
- * MOCS still can be used though.
- * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
- * before this patch, i.e. the same uncached + snooping access
- * like on gen6/7 seems to be in effect.
- * - So this just fixes blitter/render access. Again it looks
- * like it's not just uncached access, but uncached + snooping.
- * So we can still hold onto all our assumptions wrt cpu
- * clflushing on LLC machines.
- */
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
- return;
- }
-
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
- __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
- __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
- __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
- __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-static void chv_setup_private_ppat(struct intel_ppat *ppat)
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- ppat->max_entries = 8;
- ppat->update_hw = bdw_private_pat_update_hw;
- ppat->match = chv_private_pat_match;
- ppat->clear_value = CHV_PPAT_SNOOP;
+ u64 pat;
/*
* Map WB on BDW to snooped on CHV.
@@ -3275,14 +2941,17 @@ static void chv_setup_private_ppat(struct intel_ppat *ppat)
* in order to keep the global status page working.
*/
- __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 1, 0);
- __alloc_ppat_entry(ppat, 2, 0);
- __alloc_ppat_entry(ppat, 3, 0);
- __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -3295,27 +2964,16 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
static void setup_private_pat(struct drm_i915_private *dev_priv)
{
- struct intel_ppat *ppat = &dev_priv->ppat;
- int i;
-
- ppat->i915 = dev_priv;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(ppat);
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_setup_private_ppat(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 10)
+ cnl_setup_private_ppat(dev_priv);
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(ppat);
+ chv_setup_private_ppat(dev_priv);
else
- bdw_setup_private_ppat(ppat);
-
- GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
-
- for_each_clear_bit(i, ppat->used, ppat->max_entries) {
- ppat->entries[i].value = ppat->clear_value;
- ppat->entries[i].ppat = ppat;
- set_bit(i, ppat->dirty);
- }
-
- ppat->update_hw(dev_priv);
+ bdw_setup_private_ppat(dev_priv);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3360,11 +3018,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
-
- /* Prevent recursively calling stop_machine() and deadlocks. */
- dev_info(dev_priv->drm.dev,
- "Disabling error capture for VT-d workaround\n");
- i915_disable_error_state(dev_priv, -ENODEV);
}
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3477,26 +3130,24 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
- DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+ dev_notice(dev_priv->drm.dev,
+ "Applying Ironlake quirks for intel_iommu\n");
return 0;
}
-/**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @dev_priv: i915 device
- */
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = gt->i915;
int ret;
- ggtt->vm.i915 = dev_priv;
- ggtt->vm.dma = &dev_priv->drm.pdev->dev;
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = i915;
+ ggtt->vm.dma = &i915->drm.pdev->dev;
- if (INTEL_GEN(dev_priv) <= 5)
+ if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
- else if (INTEL_GEN(dev_priv) < 8)
+ else if (INTEL_GEN(i915) < 8)
ret = gen6_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
@@ -3524,51 +3175,82 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
- if (intel_vtd_active())
- DRM_INFO("VT-d active for gfx access\n");
return 0;
}
/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
*/
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- stash_init(&dev_priv->mm.wc_stash);
+ ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ if (ret)
+ return ret;
+
+ if (intel_vtd_active())
+ dev_info(i915->drm.dev, "VT-d active for gfx access\n");
+
+ return 0;
+}
+
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ int ret = 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
- /* Note that we use page colouring to enforce a guard page at the
- * end of the address space. This is required as the CS may prefetch
- * beyond the end of the batch buffer, across the page boundary,
- * and beyond the end of the GTT if we do not provide a guard.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true;
/* Only VLV supports read-only GGTT mappings */
- ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
- if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
+ if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
- dev_priv->ggtt.gmadr.start,
- dev_priv->ggtt.mappable_end)) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
ret = -EIO;
- goto out_gtt_cleanup;
+ goto out;
}
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
i915_ggtt_init_fences(ggtt);
+out:
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return ret;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev_priv: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ stash_init(&dev_priv->mm.wc_stash);
+
+ /* Note that we use page colouring to enforce a guard page at the
+ * end of the address space. This is required as the CS may prefetch
+ * beyond the end of the batch buffer, across the page boundary,
+ * and beyond the end of the GTT if we do not provide a guard.
+ */
+ ret = ggtt_init_hw(&dev_priv->ggtt);
+ if (ret)
+ return ret;
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@@ -3580,7 +3262,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
- ggtt->vm.cleanup(&ggtt->vm);
+ dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
return ret;
}
@@ -3592,35 +3274,35 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_ggtt_enable_guc(struct drm_i915_private *i915)
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
{
- GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
+ GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
- i915->ggtt.invalidate = guc_ggtt_invalidate;
+ ggtt->invalidate = guc_ggtt_invalidate;
- i915_ggtt_invalidate(i915);
+ ggtt->invalidate(ggtt);
}
-void i915_ggtt_disable_guc(struct drm_i915_private *i915)
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
{
/* XXX Temporary pardon for error unload */
- if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
+ if (ggtt->invalidate == gen6_ggtt_invalidate)
return;
/* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
+ GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
- i915->ggtt.invalidate = gen6_ggtt_invalidate;
+ ggtt->invalidate = gen6_ggtt_invalidate;
- i915_ggtt_invalidate(i915);
+ ggtt->invalidate(ggtt);
}
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
+static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma, *vn;
+ bool flush = false;
- i915_check_and_clear_faults(dev_priv);
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
mutex_lock(&ggtt->vm.mutex);
@@ -3643,10 +3325,9 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
PIN_UPDATE));
- if (obj) {
- i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
- i915_gem_object_unlock(obj);
+ if (obj) { /* only used during resume => exclusive access */
+ flush |= fetch_and_zero(&obj->write_domain);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
lock:
@@ -3654,17 +3335,20 @@ lock:
}
ggtt->vm.closed = false;
- i915_ggtt_invalidate(dev_priv);
+ ggtt->invalidate(ggtt);
mutex_unlock(&ggtt->vm.mutex);
- if (INTEL_GEN(dev_priv) >= 8) {
- struct intel_ppat *ppat = &dev_priv->ppat;
+ if (flush)
+ wbinvd_on_all_cpus();
+}
- bitmap_set(ppat->dirty, 0, ppat->max_entries);
- dev_priv->ppat.update_hw(dev_priv);
- return;
- }
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
+{
+ ggtt_restore_mappings(&i915->ggtt);
+
+ if (INTEL_GEN(i915) >= 8)
+ setup_private_pat(i915);
}
static struct scatterlist *
@@ -3953,7 +3637,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -4050,7 +3734,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
@@ -4093,7 +3777,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (flags & PIN_NOEVICT)
return -ENOSPC;
- /* No free space, pick a slot at random.
+ /*
+ * No free space, pick a slot at random.
*
* There is a pathological case here using a GTT shared between
* mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
@@ -4121,6 +3806,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (flags & PIN_NOSEARCH)
+ return -ENOSPC;
+
/* Randomly selected placement is pinned, do a search */
err = i915_gem_evict_something(vm, size, alignment, color,
start, end, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 812717ccc69b..b97a47fc7a68 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,15 +35,19 @@
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
+#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
#include "gt/intel_reset.h"
#include "i915_gem_fence_reg.h"
#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_selftest.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
@@ -64,12 +68,10 @@
struct drm_i915_file_private;
struct drm_i915_gem_object;
struct i915_vma;
+struct intel_gt;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
-typedef u64 gen8_pde_t;
-typedef u64 gen8_ppgtt_pdpe_t;
-typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
@@ -113,30 +115,18 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-/* GEN8 32b style address is defined as a 3 level page table:
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
* 31:30 | 29:21 | 20:12 | 11:0
* PDPE | PDE | PTE | offset
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
- */
-#define GEN8_3LVL_PDPES 4
-#define GEN8_PDE_SHIFT 21
-#define GEN8_PDE_MASK 0x1ff
-#define GEN8_PTE_SHIFT 12
-#define GEN8_PTE_MASK 0x1ff
-#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-
-/* GEN8 48b style address is defined as a 4 level page table:
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
* 47:39 | 38:30 | 29:21 | 20:12 | 11:0
* PML4E | PDPE | PDE | PTE | offset
*/
-#define GEN8_PML4ES_PER_PML4 512
-#define GEN8_PML4E_SHIFT 39
-#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
-#define GEN8_PDPE_SHIFT 30
-/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
- * tables */
-#define GEN8_PDPE_MASK 0x1ff
+#define GEN8_3LVL_PDPES 4
#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE 0 /* WB LLC */
@@ -155,11 +145,6 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
-#define GEN8_PPAT_GET_CA(x) ((x) & 3)
-#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
-#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
-#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
-
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
@@ -243,8 +228,10 @@ struct i915_page_dma {
};
};
-#define px_base(px) (&(px)->base)
-#define px_dma(px) (px_base(px)->daddr)
+struct i915_page_scratch {
+ struct i915_page_dma base;
+ u64 encode;
+};
struct i915_page_table {
struct i915_page_dma base;
@@ -252,12 +239,32 @@ struct i915_page_table {
};
struct i915_page_directory {
- struct i915_page_dma base;
- atomic_t used;
+ struct i915_page_table pt;
spinlock_t lock;
void *entry[512];
};
+#define __px_choose_expr(x, type, expr, other) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), type) || \
+ __builtin_types_compatible_p(typeof(x), const type), \
+ ({ type __x = (type)(x); expr; }), \
+ other)
+
+#define px_base(px) \
+ __px_choose_expr(px, struct i915_page_dma *, __x, \
+ __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_table *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
+ (void)0))))
+#define px_dma(px) (px_base(px)->daddr)
+
+#define px_pt(px) \
+ __px_choose_expr(px, struct i915_page_table *, __x, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
+ (void)0))
+#define px_used(px) (&px_pt(px)->used)
+
struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
int (*bind_vma)(struct i915_vma *vma,
@@ -280,8 +287,10 @@ struct pagestash {
struct i915_address_space {
struct kref ref;
+ struct rcu_work rcu;
struct drm_mm mm;
+ struct intel_gt *gt;
struct drm_i915_private *i915;
struct device *dma;
/* Every address space belongs to a struct file - except for the global
@@ -302,12 +311,9 @@ struct i915_address_space {
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
- u64 scratch_pte;
- int scratch_order;
- struct i915_page_dma scratch_page;
- struct i915_page_table *scratch_pt;
- struct i915_page_directory *scratch_pd;
- struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */
+ struct i915_page_scratch scratch[4];
+ unsigned int scratch_order;
+ unsigned int top;
/**
* List of vma currently bound.
@@ -386,7 +392,10 @@ struct i915_ggtt {
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
- void (*invalidate)(struct drm_i915_private *dev_priv);
+ void (*invalidate)(struct i915_ggtt *ggtt);
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_ppgtt *alias;
bool do_idle_maps;
@@ -425,8 +434,6 @@ struct gen6_ppgtt {
unsigned int pin_count;
bool scan_for_unused_pt;
-
- struct gen6_ppgtt_cleanup_work *work;
};
#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
@@ -506,15 +513,6 @@ static inline u32 gen6_pde_index(u32 addr)
return i915_pde_index(addr, GEN6_PDE_SHIFT);
}
-static inline unsigned int
-i915_pdpes_per_pdp(const struct i915_address_space *vm)
-{
- if (i915_vm_is_4lvl(vm))
- return GEN8_PML4ES_PER_PML4;
-
- return GEN8_3LVL_PDPES;
-}
-
static inline struct i915_page_table *
i915_pt_entry(const struct i915_page_directory * const pd,
const unsigned short n)
@@ -529,73 +527,12 @@ i915_pd_entry(const struct i915_page_directory * const pdp,
return pdp->entry[n];
}
-static inline struct i915_page_directory *
-i915_pdp_entry(const struct i915_page_directory * const pml4,
- const unsigned short n)
-{
- return pml4->entry[n];
-}
-
-/* Equivalent to the gen6 version, For each pde iterates over every pde
- * between from start until start + length. On gen8+ it simply iterates
- * over every page directory entry in a page directory.
- */
-#define gen8_for_each_pde(pt, pd, start, length, iter) \
- for (iter = gen8_pde_index(start); \
- length > 0 && iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
- for (iter = gen8_pdpe_index(start); \
- length > 0 && iter < i915_pdpes_per_pdp(vm) && \
- (pd = i915_pd_entry(pdp, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
- for (iter = gen8_pml4e_index(start); \
- length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
- (pdp = i915_pdp_entry(pml4, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-static inline u32 gen8_pte_index(u64 address)
-{
- return i915_pte_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pde_index(u64 address)
-{
- return i915_pde_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pdpe_index(u64 address)
-{
- return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
-}
-
-static inline u32 gen8_pml4e_index(u64 address)
-{
- return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
-}
-
-static inline u64 gen8_pte_count(u64 address, u64 length)
-{
- return i915_pte_count(address, length, GEN8_PDE_SHIFT);
-}
-
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
{
- struct i915_page_directory *pd;
+ struct i915_page_dma *pt = ppgtt->pd->entry[n];
- pd = i915_pdp_entry(ppgtt->pd, n);
- return px_dma(pd);
+ return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
}
static inline struct i915_ggtt *
@@ -614,46 +551,15 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
return container_of(vm, struct i915_ppgtt, vm);
}
-#define INTEL_MAX_PPAT_ENTRIES 8
-#define INTEL_PPAT_PERFECT_MATCH (~0U)
-
-struct intel_ppat;
-
-struct intel_ppat_entry {
- struct intel_ppat *ppat;
- struct kref ref;
- u8 value;
-};
-
-struct intel_ppat {
- struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
- DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
- DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
- unsigned int max_entries;
- u8 clear_value;
- /*
- * Return a score to show how two PPAT values match,
- * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
- */
- unsigned int (*match)(u8 src, u8 dst);
- void (*update_hw)(struct drm_i915_private *i915);
-
- struct drm_i915_private *i915;
-};
-
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value);
-void intel_ppat_put(const struct intel_ppat_entry *entry);
-
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-void i915_ggtt_enable_guc(struct drm_i915_private *i915);
-void i915_ggtt_disable_guc(struct drm_i915_private *i915);
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
+int i915_init_ggtt(struct drm_i915_private *dev_priv);
+void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
+int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
@@ -694,9 +600,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK BIT_ULL(0)
-#define PIN_NONFAULT BIT_ULL(1)
-#define PIN_NOEVICT BIT_ULL(2)
+#define PIN_NOEVICT BIT_ULL(0)
+#define PIN_NOSEARCH BIT_ULL(1)
+#define PIN_NONBLOCK BIT_ULL(2)
#define PIN_MAPPABLE BIT_ULL(3)
#define PIN_ZONE_4G BIT_ULL(4)
#define PIN_HIGH BIT_ULL(5)
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
deleted file mode 100644
index 112cda8fa1a8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _I915_GEM_RENDER_STATE_H_
-#define _I915_GEM_RENDER_STATE_H_
-
-struct i915_request;
-
-int i915_gem_render_state_emit(struct i915_request *rq);
-
-#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
new file mode 100644
index 000000000000..5d9101376a3d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -0,0 +1,168 @@
+/*
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "gt/intel_engine_user.h"
+
+#include "i915_drv.h"
+
+int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ drm_i915_getparam_t *param = data;
+ int value;
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ case I915_PARAM_LAST_DISPATCH:
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+ case I915_PARAM_CHIPSET_ID:
+ value = i915->drm.pdev->device;
+ break;
+ case I915_PARAM_REVISION:
+ value = i915->drm.pdev->revision;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = i915->ggtt.num_fences;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = !!i915->overlay;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO, 0);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY, 0);
+ break;
+ case I915_PARAM_HAS_VEBOX:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO_ENHANCE, 0);
+ break;
+ case I915_PARAM_HAS_BSD2:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO, 1);
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(i915);
+ break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(i915);
+ break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = INTEL_PPGTT(i915);
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version(i915);
+ break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = intel_sseu_subslice_total(sseu);
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = sseu->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_HAS_GPU_RESET:
+ value = i915_modparams.enable_hangcheck &&
+ intel_has_gpu_reset(i915);
+ if (value && intel_has_reset_engine(i915))
+ value = 2;
+ break;
+ case I915_PARAM_HAS_RESOURCE_STREAMER:
+ value = 0;
+ break;
+ case I915_PARAM_HAS_POOLED_EU:
+ value = HAS_POOLED_EU(i915);
+ break;
+ case I915_PARAM_MIN_EU_IN_POOL:
+ value = sseu->min_eu_in_pool;
+ break;
+ case I915_PARAM_HUC_STATUS:
+ value = intel_huc_check_status(&i915->gt.uc.huc);
+ if (value < 0)
+ return value;
+ break;
+ case I915_PARAM_MMAP_GTT_VERSION:
+ /* Though we've started our numbering from 1, and so class all
+ * earlier versions as 0, in effect their value is undefined as
+ * the ioctl will report EINVAL for the unknown param!
+ */
+ value = i915_gem_mmap_gtt_version();
+ break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = i915->caps.scheduler;
+ break;
+
+ case I915_PARAM_MMAP_VERSION:
+ /* Remember to bump this if the version changes! */
+ case I915_PARAM_HAS_GEM:
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ case I915_PARAM_HAS_EXEC_SOFTPIN:
+ case I915_PARAM_HAS_EXEC_ASYNC:
+ case I915_PARAM_HAS_EXEC_FENCE:
+ case I915_PARAM_HAS_EXEC_CAPTURE:
+ case I915_PARAM_HAS_EXEC_BATCH_FIRST:
+ case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
+ case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
+ /* For the time being all of these are always true;
+ * if some supported hardware does not have one of these
+ * features this value needs to be provided from
+ * INTEL_INFO(), a feature macro, or similar.
+ */
+ value = 1;
+ break;
+ case I915_PARAM_HAS_CONTEXT_ISOLATION:
+ value = intel_engines_has_context_isolation(i915);
+ break;
+ case I915_PARAM_SLICE_MASK:
+ value = sseu->slice_mask;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_SUBSLICE_MASK:
+ value = sseu->subslice_mask[0];
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
+ value = 1000 * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+ break;
+ case I915_PARAM_MMAP_GTT_COHERENT:
+ value = INTEL_INFO(i915)->has_coherent_ggtt;
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
+ return -EINVAL;
+ }
+
+ if (put_user(value, param->value))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 2d5fcba98841..be127cd28931 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -62,6 +62,7 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
+ i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
i915_global_objects_init,
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index 04c1ce107fc0..b2f5cd9b9b1a 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -7,6 +7,8 @@
#ifndef _I915_GLOBALS_H_
#define _I915_GLOBALS_H_
+#include <linux/types.h>
+
typedef void (*i915_global_func_t)(void);
struct i915_global {
@@ -25,6 +27,7 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
+int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8bc76fcff70d..e284bd76fa86 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -29,8 +29,8 @@
#include <linux/ascii85.h>
#include <linux/nmi.h>
+#include <linux/pagevec.h>
#include <linux/scatterlist.h>
-#include <linux/stop_machine.h>
#include <linux/utsname.h>
#include <linux/zlib.h>
@@ -43,49 +43,12 @@
#include "i915_drv.h"
#include "i915_gpu_error.h"
+#include "i915_memcpy.h"
#include "i915_scatterlist.h"
#include "intel_csr.h"
-static inline const struct intel_engine_cs *
-engine_lookup(const struct drm_i915_private *i915, unsigned int id)
-{
- if (id >= I915_NUM_ENGINES)
- return NULL;
-
- return i915->engine[id];
-}
-
-static inline const char *
-__engine_name(const struct intel_engine_cs *engine)
-{
- return engine ? engine->name : "";
-}
-
-static const char *
-engine_name(const struct drm_i915_private *i915, unsigned int id)
-{
- return __engine_name(engine_lookup(i915, id));
-}
-
-static const char *tiling_flag(int tiling)
-{
- switch (tiling) {
- default:
- case I915_TILING_NONE: return "";
- case I915_TILING_X: return " X";
- case I915_TILING_Y: return " Y";
- }
-}
-
-static const char *dirty_flag(int dirty)
-{
- return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
- return purgeable ? " purgeable" : "";
-}
+#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
static void __sg_set_buf(struct scatterlist *sg,
void *addr, unsigned int len, loff_t it)
@@ -114,7 +77,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
if (e->cur == e->end) {
struct scatterlist *sgl;
- sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
if (!sgl) {
e->err = -ENOMEM;
return false;
@@ -134,7 +97,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
}
e->size = ALIGN(len + 1, SZ_64K);
- e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ e->buf = kmalloc(e->size, ALLOW_FAIL);
if (!e->buf) {
e->size = PAGE_ALIGN(len + 1);
e->buf = kmalloc(e->size, GFP_KERNEL);
@@ -211,47 +174,115 @@ i915_error_printer(struct drm_i915_error_state_buf *e)
return p;
}
+/* single threaded page allocator with a reserved stash for emergencies */
+static void pool_fini(struct pagevec *pv)
+{
+ pagevec_release(pv);
+}
+
+static int pool_refill(struct pagevec *pv, gfp_t gfp)
+{
+ while (pagevec_space(pv)) {
+ struct page *p;
+
+ p = alloc_page(gfp);
+ if (!p)
+ return -ENOMEM;
+
+ pagevec_add(pv, p);
+ }
+
+ return 0;
+}
+
+static int pool_init(struct pagevec *pv, gfp_t gfp)
+{
+ int err;
+
+ pagevec_init(pv);
+
+ err = pool_refill(pv, gfp);
+ if (err)
+ pool_fini(pv);
+
+ return err;
+}
+
+static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
+{
+ struct page *p;
+
+ p = alloc_page(gfp);
+ if (!p && pagevec_count(pv))
+ p = pv->pages[--pv->nr];
+
+ return p ? page_address(p) : NULL;
+}
+
+static void pool_free(struct pagevec *pv, void *addr)
+{
+ struct page *p = virt_to_page(addr);
+
+ if (pagevec_space(pv))
+ pagevec_add(pv, p);
+ else
+ __free_page(p);
+}
+
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
struct compress {
+ struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
};
static bool compress_init(struct compress *c)
{
- struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
+ struct z_stream_s *zstream = &c->zstream;
- zstream->workspace =
- kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
- GFP_ATOMIC | __GFP_NOWARN);
- if (!zstream->workspace)
+ if (pool_init(&c->pool, ALLOW_FAIL))
return false;
- if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
- kfree(zstream->workspace);
+ zstream->workspace =
+ kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ ALLOW_FAIL);
+ if (!zstream->workspace) {
+ pool_fini(&c->pool);
return false;
}
c->tmp = NULL;
if (i915_has_memcpy_from_wc())
- c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
return true;
}
-static void *compress_next_page(struct drm_i915_error_object *dst)
+static bool compress_start(struct compress *c)
{
- unsigned long page;
+ struct z_stream_s *zstream = &c->zstream;
+ void *workspace = zstream->workspace;
+
+ memset(zstream, 0, sizeof(*zstream));
+ zstream->workspace = workspace;
+
+ return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
+}
+
+static void *compress_next_page(struct compress *c,
+ struct drm_i915_error_object *dst)
+{
+ void *page;
if (dst->page_count >= dst->num_pages)
return ERR_PTR(-ENOSPC);
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ page = pool_alloc(&c->pool, ALLOW_FAIL);
if (!page)
return ERR_PTR(-ENOMEM);
- return dst->pages[dst->page_count++] = (void *)page;
+ return dst->pages[dst->page_count++] = page;
}
static int compress_page(struct compress *c,
@@ -267,7 +298,7 @@ static int compress_page(struct compress *c,
do {
if (zstream->avail_out == 0) {
- zstream->next_out = compress_next_page(dst);
+ zstream->next_out = compress_next_page(c, dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
@@ -276,8 +307,6 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
-
- touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -295,7 +324,7 @@ static int compress_flush(struct compress *c,
do {
switch (zlib_deflate(zstream, Z_FINISH)) {
case Z_OK: /* more space requested */
- zstream->next_out = compress_next_page(dst);
+ zstream->next_out = compress_next_page(c, dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
@@ -316,15 +345,17 @@ end:
return 0;
}
-static void compress_fini(struct compress *c,
- struct drm_i915_error_object *dst)
+static void compress_finish(struct compress *c)
{
- struct z_stream_s *zstream = &c->zstream;
+ zlib_deflateEnd(&c->zstream);
+}
- zlib_deflateEnd(zstream);
- kfree(zstream->workspace);
+static void compress_fini(struct compress *c)
+{
+ kfree(c->zstream.workspace);
if (c->tmp)
- free_page((unsigned long)c->tmp);
+ pool_free(&c->pool, c->tmp);
+ pool_fini(&c->pool);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
@@ -335,10 +366,16 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
struct compress {
+ struct pagevec pool;
};
static bool compress_init(struct compress *c)
{
+ return pool_init(&c->pool, ALLOW_FAIL) == 0;
+}
+
+static bool compress_start(struct compress *c)
+{
return true;
}
@@ -346,14 +383,12 @@ static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
{
- unsigned long page;
void *ptr;
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page)
+ ptr = pool_alloc(&c->pool, ALLOW_FAIL);
+ if (!ptr)
return -ENOMEM;
- ptr = (void *)page;
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
@@ -367,9 +402,13 @@ static int compress_flush(struct compress *c,
return 0;
}
-static void compress_fini(struct compress *c,
- struct drm_i915_error_object *dst)
+static void compress_finish(struct compress *c)
+{
+}
+
+static void compress_fini(struct compress *c)
{
+ pool_fini(&c->pool);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
@@ -379,36 +418,6 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#endif
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
- const char *name,
- struct drm_i915_error_buffer *err,
- int count)
-{
- err_printf(m, "%s [%d]:\n", name, count);
-
- while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x",
- upper_32_bits(err->gtt_offset),
- lower_32_bits(err->gtt_offset),
- err->size,
- err->read_domains,
- err->write_domain);
- err_puts(m, tiling_flag(err->tiling));
- err_puts(m, dirty_flag(err->dirty));
- err_puts(m, purgeable_flag(err->purgeable));
- err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
-
- if (err->name)
- err_printf(m, " (name: %d)", err->name);
- if (err->fence_reg != I915_FENCE_REG_NONE)
- err_printf(m, " (fence: %d)", err->fence_reg);
-
- err_puts(m, "\n");
- err++;
- }
-}
-
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
@@ -418,7 +427,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
@@ -472,8 +481,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
{
int n;
- err_printf(m, "%s command stream:\n",
- engine_name(m->i915, ee->engine_id));
+ err_printf(m, "%s command stream:\n", ee->engine->name);
err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
@@ -549,9 +557,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
}
static void print_error_obj(struct drm_i915_error_state_buf *m,
- struct intel_engine_cs *engine,
+ const struct intel_engine_cs *engine,
const char *name,
- struct drm_i915_error_object *obj)
+ const struct drm_i915_error_object *obj)
{
char out[ASCII85_BUFSZ];
int page;
@@ -620,7 +628,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error =
container_of(error_uc, typeof(*error), uc);
- if (!error->device_info.has_guc)
+ if (!error->device_info.has_gt_uc)
return;
intel_uc_fw_dump(&error_uc->guc_fw, &p);
@@ -648,7 +656,7 @@ static void err_free_sgl(struct scatterlist *sgl)
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
struct i915_gpu_state *error)
{
- struct drm_i915_error_object *obj;
+ const struct drm_i915_error_engine *ee;
struct timespec64 ts;
int i, j;
@@ -657,6 +665,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Kernel: %s %s\n",
init_utsname()->release,
init_utsname()->machine);
+ err_printf(m, "Driver: %s\n", DRIVER_DATE);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -672,15 +681,12 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
jiffies_to_msecs(jiffies - error->capture),
jiffies_to_msecs(error->capture - error->epoch));
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (!error->engine[i].context.pid)
- continue;
-
+ for (ee = error->engine; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
- engine_name(m->i915, i),
- error->engine[i].context.comm,
- error->engine[i].context.pid);
- }
+ ee->engine->name,
+ ee->context.comm,
+ ee->context.pid);
+
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
@@ -716,57 +722,27 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_GEN(m->i915) >= 6) {
+ if (IS_GEN_RANGE(m->i915, 6, 11)) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
-
- if (INTEL_GEN(m->i915) >= 8)
- err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
- error->fault_data1, error->fault_data0);
-
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
+ if (INTEL_GEN(m->i915) >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ error->fault_data1, error->fault_data0);
+
if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].engine_id != -1)
- error_print_engine(m, &error->engine[i], error->epoch);
- }
-
- for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
- char buf[128];
- int len, first = 1;
-
- if (!error->active_vm[i])
- break;
-
- len = scnprintf(buf, sizeof(buf), "Active (");
- for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
- if (error->engine[j].vm != error->active_vm[i])
- continue;
-
- len += scnprintf(buf + len, sizeof(buf), "%s%s",
- first ? "" : ", ",
- m->i915->engine[j]->name);
- first = 0;
- }
- scnprintf(buf + len, sizeof(buf), ")");
- print_error_buffers(m, buf,
- error->active_bo[i],
- error->active_bo_count[i]);
- }
+ for (ee = error->engine; ee; ee = ee->next)
+ error_print_engine(m, ee, error->epoch);
- print_error_buffers(m, "Pinned (global)",
- error->pinned_bo,
- error->pinned_bo_count);
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- const struct drm_i915_error_engine *ee = &error->engine[i];
+ for (ee = error->engine; ee; ee = ee->next) {
+ const struct drm_i915_error_object *obj;
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, m->i915->engine[i]->name);
+ err_puts(m, ee->engine->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d])",
ee->context.comm,
@@ -774,16 +750,15 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, m->i915->engine[i], NULL, obj);
+ print_error_obj(m, ee->engine, NULL, obj);
}
for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, m->i915->engine[i],
- "user", ee->user_bo[j]);
+ print_error_obj(m, ee->engine, "user", ee->user_bo[j]);
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- m->i915->engine[i]->name,
+ ee->engine->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
@@ -791,22 +766,13 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
error->epoch);
}
- print_error_obj(m, m->i915->engine[i],
- "ringbuffer", ee->ringbuffer);
-
- print_error_obj(m, m->i915->engine[i],
- "HW Status", ee->hws_page);
-
- print_error_obj(m, m->i915->engine[i],
- "HW context", ee->ctx);
-
- print_error_obj(m, m->i915->engine[i],
- "WA context", ee->wa_ctx);
-
- print_error_obj(m, m->i915->engine[i],
+ print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
+ print_error_obj(m, ee->engine, "HW Status", ee->hws_page);
+ print_error_obj(m, ee->engine, "HW context", ee->ctx);
+ print_error_obj(m, ee->engine, "WA context", ee->wa_ctx);
+ print_error_obj(m, ee->engine,
"WA batchbuffer", ee->wa_batchbuffer);
-
- print_error_obj(m, m->i915->engine[i],
+ print_error_obj(m, ee->engine,
"NULL context", ee->default_state);
}
@@ -955,13 +921,15 @@ void __i915_gpu_state_free(struct kref *error_ref)
{
struct i915_gpu_state *error =
container_of(error_ref, typeof(*error), ref);
- long i, j;
+ long i;
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- struct drm_i915_error_engine *ee = &error->engine[i];
+ while (error->engine) {
+ struct drm_i915_error_engine *ee = error->engine;
- for (j = 0; j < ee->user_bo_count; j++)
- i915_error_object_free(ee->user_bo[j]);
+ error->engine = ee->next;
+
+ for (i = 0; i < ee->user_bo_count; i++)
+ i915_error_object_free(ee->user_bo[i]);
kfree(ee->user_bo);
i915_error_object_free(ee->batchbuffer);
@@ -972,12 +940,9 @@ void __i915_gpu_state_free(struct kref *error_ref)
i915_error_object_free(ee->wa_ctx);
kfree(ee->requests);
+ kfree(ee);
}
- for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
- kfree(error->active_bo[i]);
- kfree(error->pinned_bo);
-
kfree(error->overlay);
kfree(error->display);
@@ -990,108 +955,63 @@ void __i915_gpu_state_free(struct kref *error_ref)
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *i915,
- struct i915_vma *vma)
+ struct i915_vma *vma,
+ struct compress *compress)
{
struct i915_ggtt *ggtt = &i915->ggtt;
const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- struct compress compress;
unsigned long num_pages;
struct sgt_iter iter;
dma_addr_t dma;
int ret;
+ might_sleep();
+
if (!vma || !vma->pages)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
- GFP_ATOMIC | __GFP_NOWARN);
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
if (!dst)
return NULL;
+ if (!compress_start(compress)) {
+ kfree(dst);
+ return NULL;
+ }
+
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
- if (!compress_init(&compress)) {
- kfree(dst);
- return NULL;
- }
-
ret = -EINVAL;
for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s;
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
- ret = compress_page(&compress, (void __force *)s, dst);
- io_mapping_unmap_atomic(s);
+ s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
if (ret)
break;
}
- if (ret || compress_flush(&compress, dst)) {
+ if (ret || compress_flush(compress, dst)) {
while (dst->page_count--)
- free_page((unsigned long)dst->pages[dst->page_count]);
+ pool_free(&compress->pool, dst->pages[dst->page_count]);
kfree(dst);
dst = NULL;
}
+ compress_finish(compress);
- compress_fini(&compress, dst);
return dst;
}
-static void capture_bo(struct drm_i915_error_buffer *err,
- struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
-
- err->size = obj->base.size;
- err->name = obj->base.name;
-
- err->gtt_offset = vma->node.start;
- err->read_domains = obj->read_domains;
- err->write_domain = obj->write_domain;
- err->fence_reg = vma->fence ? vma->fence->id : -1;
- err->tiling = i915_gem_object_get_tiling(obj);
- err->dirty = obj->mm.dirty;
- err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
- err->userptr = obj->userptr.mm != NULL;
- err->cache_level = obj->cache_level;
-}
-
-static u32 capture_error_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head,
- unsigned int flags)
-#define ACTIVE_ONLY BIT(0)
-#define PINNED_ONLY BIT(1)
-{
- struct i915_vma *vma;
- int i = 0;
-
- list_for_each_entry(vma, head, vm_link) {
- if (!vma->obj)
- continue;
-
- if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma))
- continue;
-
- if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma))
- continue;
-
- capture_bo(err++, vma);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
/*
* Generate a semi-unique error code. The code is not meant to have meaning, The
* code's only purpose is to try to prevent false duplicated bug reports by
@@ -1102,23 +1022,17 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
*
* It's only a small step better than a random number in its current form.
*/
-static u32 i915_error_generate_code(struct i915_gpu_state *error,
- intel_engine_mask_t engine_mask)
+static u32 i915_error_generate_code(struct i915_gpu_state *error)
{
+ const struct drm_i915_error_engine *ee = error->engine;
+
/*
* IPEHR would be an ideal way to detect errors, as it's the gross
* measure of "the command that hung." However, has some very common
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
- if (engine_mask) {
- struct drm_i915_error_engine *ee =
- &error->engine[ffs(engine_mask)];
-
- return ee->ipehr ^ ee->instdone.instdone;
- }
-
- return 0;
+ return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
}
static void gem_record_fences(struct i915_gpu_state *error)
@@ -1153,7 +1067,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
- if (INTEL_GEN(dev_priv) >= 8)
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG);
+ else if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
else
ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
@@ -1249,10 +1166,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
}
-static void record_request(struct i915_request *request,
+static void record_request(const struct i915_request *request,
struct drm_i915_error_request *erq)
{
- struct i915_gem_context *ctx = request->gem_context;
+ const struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
erq->context = request->fence.context;
@@ -1282,7 +1199,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
if (!count)
return;
- ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
+ ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL);
if (!ee->requests)
return;
@@ -1316,27 +1233,24 @@ static void engine_record_requests(struct intel_engine_cs *engine,
ee->num_requests = count;
}
-static void error_record_engine_execlists(struct intel_engine_cs *engine,
+static void error_record_engine_execlists(const struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
const struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned int n;
+ struct i915_request * const *port = execlists->active;
+ unsigned int n = 0;
- for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct i915_request *rq = port_request(&execlists->port[n]);
-
- if (!rq)
- break;
-
- record_request(rq, &ee->execlist[n]);
- }
+ while (*port)
+ record_request(*port++, &ee->execlist[n++]);
ee->num_ports = n;
}
-static void record_context(struct drm_i915_error_context *e,
- struct i915_gem_context *ctx)
+static bool record_context(struct drm_i915_error_context *e,
+ const struct i915_request *rq)
{
+ const struct i915_gem_context *ctx = rq->gem_context;
+
if (ctx->pid) {
struct task_struct *task;
@@ -1353,10 +1267,46 @@ static void record_context(struct drm_i915_error_context *e,
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
+
+ return i915_gem_context_no_error_capture(ctx);
}
-static void request_record_user_bo(struct i915_request *request,
- struct drm_i915_error_engine *ee)
+struct capture_vma {
+ struct capture_vma *next;
+ void **slot;
+};
+
+static struct capture_vma *
+capture_vma(struct capture_vma *next,
+ struct i915_vma *vma,
+ struct drm_i915_error_object **out)
+{
+ struct capture_vma *c;
+
+ *out = NULL;
+ if (!vma)
+ return next;
+
+ c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL);
+ if (!c)
+ return next;
+
+ if (!i915_active_trygrab(&vma->active)) {
+ kfree(c);
+ return next;
+ }
+
+ c->slot = (void **)out;
+ *c->slot = i915_vma_get(vma);
+
+ c->next = next;
+ return c;
+}
+
+static struct capture_vma *
+request_record_user_bo(struct i915_request *request,
+ struct drm_i915_error_engine *ee,
+ struct capture_vma *capture)
{
struct i915_capture_list *c;
struct drm_i915_error_object **bo;
@@ -1366,33 +1316,34 @@ static void request_record_user_bo(struct i915_request *request,
for (c = request->capture_list; c; c = c->next)
max++;
if (!max)
- return;
+ return capture;
- bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+ bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
if (!bo) {
/* If we can't capture everything, try to capture something. */
max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
- bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+ bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
}
if (!bo)
- return;
+ return capture;
count = 0;
for (c = request->capture_list; c; c = c->next) {
- bo[count] = i915_error_object_create(request->i915, c->vma);
- if (!bo[count])
- break;
+ capture = capture_vma(capture, c->vma, &bo[count]);
if (++count == max)
break;
}
ee->user_bo = bo;
ee->user_bo_count = count;
+
+ return capture;
}
static struct drm_i915_error_object *
capture_object(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ struct compress *compress)
{
if (obj && i915_gem_object_has_pages(obj)) {
struct i915_vma fake = {
@@ -1402,180 +1353,140 @@ capture_object(struct drm_i915_private *dev_priv,
.obj = obj,
};
- return i915_error_object_create(dev_priv, &fake);
+ return i915_error_object_create(dev_priv, &fake, compress);
} else {
return NULL;
}
}
-static void gem_record_rings(struct i915_gpu_state *error)
+static void
+gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
{
struct drm_i915_private *i915 = error->i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
- int i;
+ struct intel_engine_cs *engine;
+ struct drm_i915_error_engine *ee;
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = i915->engine[i];
- struct drm_i915_error_engine *ee = &error->engine[i];
+ ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ if (!ee)
+ return;
+
+ for_each_uabi_engine(engine, i915) {
+ struct capture_vma *capture = NULL;
struct i915_request *request;
unsigned long flags;
- ee->engine_id = -1;
-
- if (!engine)
- continue;
-
- ee->engine_id = i;
-
- error_record_engine_registers(error, engine, ee);
- error_record_engine_execlists(engine, ee);
+ /* Refill our page pool before entering atomic section */
+ pool_refill(&compress->pool, ALLOW_FAIL);
spin_lock_irqsave(&engine->active.lock, flags);
request = intel_engine_find_active_request(engine);
- if (request) {
- struct i915_gem_context *ctx = request->gem_context;
- struct intel_ring *ring = request->ring;
+ if (!request) {
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ continue;
+ }
- ee->vm = ctx->vm ?: &ggtt->vm;
+ error->simulated |= record_context(&ee->context, request);
- record_context(&ee->context, ctx);
+ /*
+ * We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ capture = capture_vma(capture,
+ request->batch,
+ &ee->batchbuffer);
- /* We need to copy these to an anonymous buffer
- * as the simplest method to avoid being overwritten
- * by userspace.
- */
- ee->batchbuffer =
- i915_error_object_create(i915, request->batch);
+ if (HAS_BROKEN_CS_TLB(i915))
+ capture = capture_vma(capture,
+ engine->gt->scratch,
+ &ee->wa_batchbuffer);
- if (HAS_BROKEN_CS_TLB(i915))
- ee->wa_batchbuffer =
- i915_error_object_create(i915,
- i915->gt.scratch);
- request_record_user_bo(request, ee);
+ capture = request_record_user_bo(request, ee, capture);
- ee->ctx =
- i915_error_object_create(i915,
- request->hw_context->state);
+ capture = capture_vma(capture,
+ request->hw_context->state,
+ &ee->ctx);
- error->simulated |=
- i915_gem_context_no_error_capture(ctx);
+ capture = capture_vma(capture,
+ request->ring->vma,
+ &ee->ringbuffer);
- ee->rq_head = request->head;
- ee->rq_post = request->postfix;
- ee->rq_tail = request->tail;
+ ee->cpu_ring_head = request->ring->head;
+ ee->cpu_ring_tail = request->ring->tail;
- ee->cpu_ring_head = ring->head;
- ee->cpu_ring_tail = ring->tail;
- ee->ringbuffer =
- i915_error_object_create(i915, ring->vma);
+ ee->rq_head = request->head;
+ ee->rq_post = request->postfix;
+ ee->rq_tail = request->tail;
- engine_record_requests(engine, request, ee);
- }
+ engine_record_requests(engine, request, ee);
spin_unlock_irqrestore(&engine->active.lock, flags);
- ee->hws_page =
- i915_error_object_create(i915,
- engine->status_page.vma);
+ error_record_engine_registers(error, engine, ee);
+ error_record_engine_execlists(engine, ee);
- ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
+ while (capture) {
+ struct capture_vma *this = capture;
+ struct i915_vma *vma = *this->slot;
- ee->default_state = capture_object(i915, engine->default_state);
- }
-}
+ *this->slot =
+ i915_error_object_create(i915, vma, compress);
-static void gem_capture_vm(struct i915_gpu_state *error,
- struct i915_address_space *vm,
- int idx)
-{
- struct drm_i915_error_buffer *active_bo;
- struct i915_vma *vma;
- int count;
+ i915_active_ungrab(&vma->active);
+ i915_vma_put(vma);
- count = 0;
- list_for_each_entry(vma, &vm->bound_list, vm_link)
- if (i915_vma_is_active(vma))
- count++;
-
- active_bo = NULL;
- if (count)
- active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
- if (active_bo)
- count = capture_error_bo(active_bo,
- count, &vm->bound_list,
- ACTIVE_ONLY);
- else
- count = 0;
+ capture = this->next;
+ kfree(this);
+ }
- error->active_vm[idx] = vm;
- error->active_bo[idx] = active_bo;
- error->active_bo_count[idx] = count;
-}
+ ee->hws_page =
+ i915_error_object_create(i915,
+ engine->status_page.vma,
+ compress);
-static void capture_active_buffers(struct i915_gpu_state *error)
-{
- int cnt = 0, i, j;
+ ee->wa_ctx =
+ i915_error_object_create(i915,
+ engine->wa_ctx.vma,
+ compress);
- BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
- BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
- BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
+ ee->default_state =
+ capture_object(i915, engine->default_state, compress);
- /* Scan each engine looking for unique active contexts/vm */
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- struct drm_i915_error_engine *ee = &error->engine[i];
- bool found;
+ ee->engine = engine;
- if (!ee->vm)
- continue;
+ ee->next = error->engine;
+ error->engine = ee;
- found = false;
- for (j = 0; j < i && !found; j++)
- found = error->engine[j].vm == ee->vm;
- if (!found)
- gem_capture_vm(error, ee->vm, cnt++);
+ ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ if (!ee)
+ return;
}
-}
-
-static void capture_pinned_buffers(struct i915_gpu_state *error)
-{
- struct i915_address_space *vm = &error->i915->ggtt.vm;
- struct drm_i915_error_buffer *bo;
- struct i915_vma *vma;
- int count;
-
- count = 0;
- list_for_each_entry(vma, &vm->bound_list, vm_link)
- count++;
-
- bo = NULL;
- if (count)
- bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
- if (!bo)
- return;
- error->pinned_bo_count =
- capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY);
- error->pinned_bo = bo;
+ kfree(ee);
}
-static void capture_uc_state(struct i915_gpu_state *error)
+static void
+capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
{
struct drm_i915_private *i915 = error->i915;
struct i915_error_uc *error_uc = &error->uc;
+ struct intel_uc *uc = &i915->gt.uc;
/* Capturing uC state won't be useful if there is no GuC */
- if (!error->device_info.has_guc)
+ if (!error->device_info.has_gt_uc)
return;
- error_uc->guc_fw = i915->guc.fw;
- error_uc->huc_fw = i915->huc.fw;
+ memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
+ memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
/* Non-default firmware paths will be specified by the modparam.
* As modparams are generally accesible from the userspace make
* explicit copies of the firmware paths.
*/
- error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
- error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
- error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
+ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
+ error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
+ error_uc->guc_log = i915_error_object_create(i915,
+ uc->guc.log.vma,
+ compress);
}
/* Capture all registers which don't fit into another category. */
@@ -1603,7 +1514,12 @@ static void capture_reg_state(struct i915_gpu_state *error)
if (IS_GEN(i915, 7))
error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(i915) >= 12) {
+ error->fault_data0 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA0);
+ error->fault_data1 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA1);
+ } else if (INTEL_GEN(i915) >= 8) {
error->fault_data0 = intel_uncore_read(uncore,
GEN8_FAULT_TLB_DATA0);
error->fault_data1 = intel_uncore_read(uncore,
@@ -1622,8 +1538,10 @@ static void capture_reg_state(struct i915_gpu_state *error)
if (INTEL_GEN(i915) >= 6) {
error->derrmr = intel_uncore_read(uncore, DERRMR);
- error->error = intel_uncore_read(uncore, ERROR_GEN6);
- error->done_reg = intel_uncore_read(uncore, DONE_REG);
+ if (INTEL_GEN(i915) < 12) {
+ error->error = intel_uncore_read(uncore, ERROR_GEN6);
+ error->done_reg = intel_uncore_read(uncore, DONE_REG);
+ }
}
if (INTEL_GEN(i915) >= 5)
@@ -1679,24 +1597,18 @@ error_msg(struct i915_gpu_state *error,
intel_engine_mask_t engines, const char *msg)
{
int len;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++)
- if (!error->engine[i].context.pid)
- engines &= ~BIT(i);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%x:0x%08x",
INTEL_GEN(error->i915), engines,
- i915_error_generate_code(error, engines));
- if (engines) {
+ i915_error_generate_code(error));
+ if (error->engine) {
/* Just show the first executing process, more is confusing */
- i = __ffs(engines);
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->engine[i].context.comm,
- error->engine[i].context.pid);
+ error->engine->context.comm,
+ error->engine->context.pid);
}
if (msg)
len += scnprintf(error->error_msg + len,
@@ -1737,12 +1649,10 @@ static void capture_params(struct i915_gpu_state *error)
static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
{
+ const struct drm_i915_error_engine *ee;
unsigned long epoch = error->capture;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- const struct drm_i915_error_engine *ee = &error->engine[i];
+ for (ee = error->engine; ee; ee = ee->next) {
if (ee->hangcheck_timestamp &&
time_before(ee->hangcheck_timestamp, epoch))
epoch = ee->hangcheck_timestamp;
@@ -1759,56 +1669,53 @@ static void capture_finish(struct i915_gpu_state *error)
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
}
-static int capture(void *data)
-{
- struct i915_gpu_state *error = data;
-
- error->time = ktime_get_real();
- error->boottime = ktime_get_boottime();
- error->uptime = ktime_sub(ktime_get(),
- error->i915->gt.last_init_time);
- error->capture = jiffies;
-
- capture_params(error);
- capture_gen_state(error);
- capture_uc_state(error);
- capture_reg_state(error);
- gem_record_fences(error);
- gem_record_rings(error);
- capture_active_buffers(error);
- capture_pinned_buffers(error);
-
- error->overlay = intel_overlay_capture_error_state(error->i915);
- error->display = intel_display_capture_error_state(error->i915);
-
- error->epoch = capture_find_epoch(error);
-
- capture_finish(error);
- return 0;
-}
-
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
struct i915_gpu_state *
i915_capture_gpu_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
+ struct compress compress;
/* Check if GPU capture has been disabled */
error = READ_ONCE(i915->gpu_error.first_error);
if (IS_ERR(error))
return error;
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ error = kzalloc(sizeof(*error), ALLOW_FAIL);
if (!error) {
i915_disable_error_state(i915, -ENOMEM);
return ERR_PTR(-ENOMEM);
}
+ if (!compress_init(&compress)) {
+ kfree(error);
+ i915_disable_error_state(i915, -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
+
kref_init(&error->ref);
error->i915 = i915;
- stop_machine(capture, error, NULL);
+ error->time = ktime_get_real();
+ error->boottime = ktime_get_boottime();
+ error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+ error->capture = jiffies;
+
+ capture_params(error);
+ capture_gen_state(error);
+ capture_uc_state(error, &compress);
+ capture_reg_state(error);
+ gem_record_fences(error);
+ gem_record_rings(error, &compress);
+
+ error->overlay = intel_overlay_capture_error_state(i915);
+ error->display = intel_display_capture_error_state(i915);
+
+ error->epoch = capture_find_epoch(error);
+
+ capture_finish(error);
+ compress_fini(&compress);
return error;
}
@@ -1858,15 +1765,14 @@ void i915_capture_error_state(struct drm_i915_private *i915,
return;
}
- if (!warned &&
+ if (!xchg(&warned, true) &&
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
- DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
- DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- i915->drm.primary->index);
- warned = true;
+ pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
+ pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+ i915->drm.primary->index);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 2ecd0c6a1c94..df9f57766626 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -7,6 +7,7 @@
#ifndef _I915_GPU_ERROR_H_
#define _I915_GPU_ERROR_H_
+#include <linux/atomic.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/sched.h>
@@ -14,9 +15,9 @@
#include <drm/drm_mm.h>
#include "gt/intel_engine.h"
+#include "gt/uc/intel_uc_fw.h"
#include "intel_device_info.h"
-#include "intel_uc_fw.h"
#include "i915_gem.h"
#include "i915_gem_gtt.h"
@@ -80,11 +81,11 @@ struct i915_gpu_state {
struct intel_display_error_state *display;
struct drm_i915_error_engine {
- int engine_id;
+ const struct intel_engine_cs *engine;
+
/* Software tracked state */
bool idle;
unsigned long hangcheck_timestamp;
- struct i915_address_space *vm;
int num_requests;
u32 reset_count;
@@ -158,34 +159,14 @@ struct i915_gpu_state {
u32 pp_dir_base;
};
} vm_info;
- } engine[I915_NUM_ENGINES];
-
- struct drm_i915_error_buffer {
- u32 size;
- u32 name;
- u64 gtt_offset;
- u32 read_domains;
- u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
- u32 tiling:2;
- u32 dirty:1;
- u32 purgeable:1;
- u32 userptr:1;
- u32 cache_level:3;
- } *active_bo[I915_NUM_ENGINES], *pinned_bo;
- u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
- struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+ struct drm_i915_error_engine *next;
+ } *engine;
struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
- struct delayed_work hangcheck_work;
-
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
@@ -193,52 +174,11 @@ struct i915_gpu_error {
atomic_t pending_fb_pin;
- /**
- * flags: Control various stages of the GPU reset
- *
- * #I915_RESET_BACKOFF - When we start a global reset, we need to
- * serialise with any other users attempting to do the same, and
- * any global resources that may be clobber by the reset (such as
- * FENCE registers).
- *
- * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
- * acquire the struct_mutex to reset an engine, we need an explicit
- * flag to prevent two concurrent reset attempts in the same engine.
- * As the number of engines continues to grow, allocate the flags from
- * the most significant bits.
- *
- * #I915_WEDGED - If reset fails and we can no longer use the GPU,
- * we set the #I915_WEDGED bit. Prior to command submission, e.g.
- * i915_request_alloc(), this bit is checked and the sequence
- * aborted (with -EIO reported to userspace) if set.
- */
- unsigned long flags;
-#define I915_RESET_BACKOFF 0
-#define I915_RESET_MODESET 1
-#define I915_RESET_ENGINE 2
-#define I915_WEDGED (BITS_PER_LONG - 1)
-
/** Number of times the device has been reset (global) */
- u32 reset_count;
+ atomic_t reset_count;
/** Number of times an engine has been reset */
- u32 reset_engine_count[I915_NUM_ENGINES];
-
- struct mutex wedge_mutex; /* serialises wedging/unwedging */
-
- /**
- * Waitqueue to signal when a hang is detected. Used to for waiters
- * to release the struct_mutex for the reset to procede.
- */
- wait_queue_head_t wait_queue;
-
- /**
- * Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
- */
- wait_queue_head_t reset_queue;
-
- struct srcu_struct reset_backoff_srcu;
+ atomic_t reset_engine_count[I915_NUM_ENGINES];
};
struct drm_i915_error_state_buf {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b2e27b5b0df9..37e3dd3c1a9d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,15 +37,19 @@
#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "intel_drv.h"
#include "intel_pm.h"
/**
@@ -56,6 +60,8 @@
* and related files, but that will be described in separate chapters.
*/
+typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
+
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};
@@ -133,6 +139,15 @@ static const u32 hpd_gen11[HPD_NUM_PINS] = {
[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
};
+static const u32 hpd_gen12[HPD_NUM_PINS] = {
+ [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+ [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+ [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+ [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
+ [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
+ [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
+};
+
static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
@@ -148,8 +163,20 @@ static const u32 hpd_mcc[HPD_NUM_PINS] = {
[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
};
-static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
- i915_reg_t iir, i915_reg_t ier)
+static const u32 hpd_tgp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
+ [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
+ [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
+ [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
+ [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
+ [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
+ [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
+};
+
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
{
intel_uncore_write(uncore, imr, 0xffffffff);
intel_uncore_posting_read(uncore, imr);
@@ -163,7 +190,7 @@ static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
intel_uncore_posting_read(uncore, iir);
}
-static void gen2_irq_reset(struct intel_uncore *uncore)
+void gen2_irq_reset(struct intel_uncore *uncore)
{
intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
intel_uncore_posting_read16(uncore, GEN2_IMR);
@@ -177,19 +204,6 @@ static void gen2_irq_reset(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
-#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
- GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
-})
-
-#define GEN3_IRQ_RESET(uncore, type) \
- gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
-
-#define GEN2_IRQ_RESET(uncore) \
- gen2_irq_reset(uncore)
-
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
@@ -223,10 +237,10 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
-static void gen3_irq_init(struct intel_uncore *uncore,
- i915_reg_t imr, u32 imr_val,
- i915_reg_t ier, u32 ier_val,
- i915_reg_t iir)
+void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
{
gen3_assert_iir_is_zero(uncore, iir);
@@ -235,8 +249,8 @@ static void gen3_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read(uncore, imr);
}
-static void gen2_irq_init(struct intel_uncore *uncore,
- u32 imr_val, u32 ier_val)
+void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val)
{
gen2_assert_iir_is_zero(uncore);
@@ -245,27 +259,6 @@ static void gen2_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read16(uncore, GEN2_IMR);
}
-#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_init((uncore), \
- GEN8_##type##_IMR(which_), imr_val, \
- GEN8_##type##_IER(which_), ier_val, \
- GEN8_##type##_IIR(which_)); \
-})
-
-#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
- gen3_irq_init((uncore), \
- type##IMR, imr_val, \
- type##IER, ier_val, \
- type##IIR)
-
-#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
- gen2_irq_init((uncore), imr_val, ier_val)
-
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
@@ -304,41 +297,6 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
- const unsigned int bank, const unsigned int bit);
-
-static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
- const unsigned int bank,
- const unsigned int bit)
-{
- void __iomem * const regs = i915->uncore.regs;
- u32 dw;
-
- lockdep_assert_held(&i915->irq_lock);
-
- dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- if (dw & BIT(bit)) {
- /*
- * According to the BSpec, DW_IIR bits cannot be cleared without
- * first servicing the Selector & Shared IIR registers.
- */
- gen11_gt_engine_identity(i915, bank, bit);
-
- /*
- * We locked GT INT DW by reading it. If we want to (try
- * to) recover from this succesfully, we need to clear
- * our bit, otherwise we are locking the register for
- * everybody.
- */
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
-
- return true;
- }
-
- return false;
-}
-
/**
* ilk_update_display_irq - update DEIMR
* @dev_priv: driver private
@@ -369,39 +327,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
-/**
- * ilk_update_gt_irq - update GTIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- dev_priv->gt_irq_mask &= ~interrupt_mask;
- dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-}
-
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_gt_irq(dev_priv, mask, mask);
- intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
-}
-
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_gt_irq(dev_priv, mask, 0);
-}
-
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{
WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
@@ -409,178 +334,74 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-static void write_pm_imr(struct drm_i915_private *dev_priv)
-{
- i915_reg_t reg;
- u32 mask = dev_priv->pm_imr;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
- /* pm is in upper half */
- mask = mask << 16;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- reg = GEN8_GT_IMR(2);
- } else {
- reg = GEN6_PMIMR;
- }
-
- I915_WRITE(reg, mask);
- POSTING_READ(reg);
-}
-
-static void write_pm_ier(struct drm_i915_private *dev_priv)
-{
- i915_reg_t reg;
- u32 mask = dev_priv->pm_ier;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- /* pm is in upper half */
- mask = mask << 16;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- reg = GEN8_GT_IER(2);
- } else {
- reg = GEN6_PMIER;
- }
-
- I915_WRITE(reg, mask);
-}
-
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
-{
- u32 new_val;
-
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
- lockdep_assert_held(&dev_priv->irq_lock);
-
- new_val = dev_priv->pm_imr;
- new_val &= ~interrupt_mask;
- new_val |= (~enabled_irq_mask & interrupt_mask);
-
- if (new_val != dev_priv->pm_imr) {
- dev_priv->pm_imr = new_val;
- write_pm_imr(dev_priv);
- }
-}
-
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- snb_update_pm_irq(dev_priv, mask, mask);
-}
-
-static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- snb_update_pm_irq(dev_priv, mask, 0);
-}
-
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- __gen6_mask_pm_irq(dev_priv, mask);
-}
-
-static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
-{
- i915_reg_t reg = gen6_pm_iir(dev_priv);
-
- lockdep_assert_held(&dev_priv->irq_lock);
-
- I915_WRITE(reg, reset_mask);
- I915_WRITE(reg, reset_mask);
- POSTING_READ(reg);
-}
-
-static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- dev_priv->pm_ier |= enable_mask;
- write_pm_ier(dev_priv);
- gen6_unmask_pm_irq(dev_priv, enable_mask);
- /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
-}
-
-static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- dev_priv->pm_ier &= ~disable_mask;
- __gen6_mask_pm_irq(dev_priv, disable_mask);
- write_pm_ier(dev_priv);
- /* though a barrier is missing here, but don't really need a one */
-}
-
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
+ struct intel_gt *gt = &dev_priv->gt;
- while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
+ spin_lock_irq(&gt->irq_lock);
+
+ while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
;
dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
+ struct intel_gt *gt = &dev_priv->gt;
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt = &dev_priv->gt;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
if (READ_ONCE(rps->interrupts_enabled))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
WARN_ON_ONCE(rps->pm_iir);
if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
else
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
rps->interrupts_enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
+{
+ return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
}
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_gt *gt = &dev_priv->gt;
if (!READ_ONCE(rps->interrupts_enabled))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
rps->interrupts_enabled = false;
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+ gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(dev_priv);
/* Now that we will not be generating any more work, flush any
* outstanding tasks. As we are called on the RPS idle path,
@@ -594,78 +415,90 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+ spin_unlock_irq(&gt->irq_lock);
}
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts.enabled) {
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
- dev_priv->pm_guc_events);
- dev_priv->guc.interrupts.enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore,
+ gen6_pm_iir(gt->i915)) &
+ gt->pm_guc_events);
+ guc->interrupts.enabled = true;
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts.enabled = false;
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
+ gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
- gen9_reset_guc_interrupts(dev_priv);
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen9_reset_guc_interrupts(guc);
}
-void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
+void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
- spin_lock_irq(&i915->irq_lock);
- gen11_reset_one_iir(i915, 0, GEN11_GUC);
- spin_unlock_irq(&i915->irq_lock);
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ spin_unlock_irq(&gt->irq_lock);
}
-void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
- spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts.enabled) {
- u32 events = REG_FIELD_PREP(ENGINE1_MASK,
- GEN11_GUC_INTR_GUC2HOST);
-
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
- dev_priv->guc.interrupts.enabled = true;
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
+ guc->interrupts.enabled = true;
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
-void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts.enabled = false;
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
- gen11_reset_guc_interrupts(dev_priv);
+ gen11_reset_guc_interrupts(guc);
}
/**
@@ -924,11 +757,12 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
const struct drm_display_mode *mode = &vblank->hwmode;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
unsigned long irqflags;
@@ -989,9 +823,10 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
-static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
@@ -1107,10 +942,10 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
@@ -1335,17 +1170,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, gt_pm.rps.work);
+ struct intel_gt *gt = &dev_priv->gt;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
bool client_boost = false;
int new_delay, adj, min, max;
u32 pm_iir = 0;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
if (rps->interrupts_enabled) {
pm_iir = fetch_and_zero(&rps->pm_iir);
client_boost = atomic_read(&rps->num_waiters);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
@@ -1422,10 +1258,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
if (rps->interrupts_enabled)
- gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
+ spin_unlock_irq(&gt->irq_lock);
}
@@ -1442,6 +1278,7 @@ static void ivybridge_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
+ struct intel_gt *gt = &dev_priv->gt;
u32 error_status, row, bank, subbank;
char *parity_event[6];
u32 misccpctl;
@@ -1503,144 +1340,13 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
- spin_lock_irq(&dev_priv->irq_lock);
- gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
+ gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
+ spin_unlock_irq(&gt->irq_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
-static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
- u32 iir)
-{
- if (!HAS_L3_DPF(dev_priv))
- return;
-
- spin_lock(&dev_priv->irq_lock);
- gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock(&dev_priv->irq_lock);
-
- iir &= GT_PARITY_ERROR(dev_priv);
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
- dev_priv->l3_parity.which_slice |= 1 << 1;
-
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- dev_priv->l3_parity.which_slice |= 1 << 0;
-
- queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
-}
-
-static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
- if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
-}
-
-static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
- if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
-
- if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
- GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
- DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
-
- if (gt_iir & GT_PARITY_ERROR(dev_priv))
- ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
-}
-
-static void
-gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
-{
- bool tasklet = false;
-
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
- tasklet = true;
-
- if (iir & GT_RENDER_USER_INTERRUPT) {
- intel_engine_breadcrumbs_irq(engine);
- tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
- }
-
- if (tasklet)
- tasklet_hi_schedule(&engine->execlists.tasklet);
-}
-
-static void gen8_gt_irq_ack(struct drm_i915_private *i915,
- u32 master_ctl, u32 gt_iir[4])
-{
- void __iomem * const regs = i915->uncore.regs;
-
-#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
- GEN8_GT_BCS_IRQ | \
- GEN8_GT_VCS0_IRQ | \
- GEN8_GT_VCS1_IRQ | \
- GEN8_GT_VECS_IRQ | \
- GEN8_GT_PM_IRQ | \
- GEN8_GT_GUC_IRQ)
-
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
- if (likely(gt_iir[0]))
- raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
- if (likely(gt_iir[1]))
- raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(gt_iir[2]))
- raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
- if (likely(gt_iir[3]))
- raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
- }
-}
-
-static void gen8_gt_irq_handler(struct drm_i915_private *i915,
- u32 master_ctl, u32 gt_iir[4])
-{
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gen8_cs_irq_handler(i915->engine[RCS0],
- gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[BCS0],
- gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gen8_cs_irq_handler(i915->engine[VCS0],
- gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[VCS1],
- gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gen8_cs_irq_handler(i915->engine[VECS0],
- gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(i915, gt_iir[2]);
- gen9_guc_irq_handler(i915, gt_iir[2]);
- }
-}
-
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1657,6 +1363,26 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
+static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_D:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_E:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_F:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_G:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+ case HPD_PORT_H:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
+ case HPD_PORT_I:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
+ default:
+ return false;
+ }
+}
+
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1678,6 +1404,8 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
return val & ICP_DDIA_HPD_LONG_DETECT;
case HPD_PORT_B:
return val & ICP_DDIB_HPD_LONG_DETECT;
+ case HPD_PORT_C:
+ return val & TGP_DDIC_HPD_LONG_DETECT;
default:
return false;
}
@@ -1699,6 +1427,40 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
+static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ return val & ICP_DDIA_HPD_LONG_DETECT;
+ case HPD_PORT_B:
+ return val & ICP_DDIB_HPD_LONG_DETECT;
+ case HPD_PORT_C:
+ return val & TGP_DDIC_HPD_LONG_DETECT;
+ default:
+ return false;
+ }
+}
+
+static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_D:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_E:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_F:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_G:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+ case HPD_PORT_H:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
+ case HPD_PORT_I:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
+ default:
+ return false;
+ }
+}
+
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1778,6 +1540,8 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
{
enum hpd_pin pin;
+ BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
+
for_each_hpd_pin(pin) {
if ((hpd[pin] & hotplug_trigger) == 0)
continue;
@@ -1891,17 +1655,18 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
-static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
+void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = &i915->gt_pm.rps;
const u32 events = i915->pm_rps_events & pm_iir;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&gt->irq_lock);
if (unlikely(!events))
return;
- gen6_mask_pm_irq(i915, events);
+ gen6_gt_pm_mask_irq(gt, events);
if (!rps->interrupts_enabled)
return;
@@ -1910,18 +1675,19 @@ static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
schedule_work(&rps->work);
}
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_gt *gt = &dev_priv->gt;
if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&dev_priv->irq_lock);
- gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ spin_lock(&gt->irq_lock);
+ gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
if (rps->interrupts_enabled) {
rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&rps->work);
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&gt->irq_lock);
}
if (INTEL_GEN(dev_priv) >= 8)
@@ -1934,18 +1700,6 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
-{
- if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
- intel_guc_to_host_event_handler(&dev_priv->guc);
-}
-
-static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
-{
- if (iir & GEN11_GUC_INTR_GUC2HOST)
- intel_guc_to_host_event_handler(&i915->guc);
-}
-
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -2185,8 +1939,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -2254,7 +2007,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
- snb_gt_irq_handler(dev_priv, gt_iir);
+ gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -2271,8 +2024,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -2313,7 +2065,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -2337,7 +2089,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2507,10 +2259,18 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
const u32 *pins)
{
- u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
- u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ u32 ddi_hotplug_trigger;
+ u32 tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
+ if (HAS_PCH_MCC(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ tc_hotplug_trigger = 0;
+ } else {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ }
+
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
@@ -2542,6 +2302,43 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
gmbus_irq_handler(dev_priv);
}
+static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+{
+ u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
+ u32 pin_mask = 0, long_mask = 0;
+
+ if (ddi_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
+ I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ ddi_hotplug_trigger,
+ dig_hotplug_reg, hpd_tgp,
+ tgp_ddi_port_hotplug_long_detect);
+ }
+
+ if (tc_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
+ I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ tc_hotplug_trigger,
+ dig_hotplug_reg, hpd_tgp,
+ tgp_tc_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+
+ if (pch_iir & SDE_GMBUS_ICP)
+ gmbus_irq_handler(dev_priv);
+}
+
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
@@ -2691,8 +2488,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
*/
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -2723,9 +2519,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_GEN(dev_priv) >= 6)
- snb_gt_irq_handler(dev_priv, gt_iir);
+ gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
else
- ilk_gt_irq_handler(dev_priv, gt_iir);
+ gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
}
de_iir = I915_READ(DEIIR);
@@ -2778,6 +2574,16 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+ long_pulse_detect_func long_pulse_detect;
+ const u32 *hpd;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ long_pulse_detect = gen12_port_hotplug_long_detect;
+ hpd = hpd_gen12;
+ } else {
+ long_pulse_detect = gen11_port_hotplug_long_detect;
+ hpd = hpd_gen11;
+ }
if (trigger_tc) {
u32 dig_hotplug_reg;
@@ -2786,8 +2592,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
- dig_hotplug_reg, hpd_gen11,
- gen11_port_hotplug_long_detect);
+ dig_hotplug_reg, hpd, long_pulse_detect);
}
if (trigger_tbt) {
@@ -2797,8 +2602,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
- dig_hotplug_reg, hpd_gen11,
- gen11_port_hotplug_long_detect);
+ dig_hotplug_reg, hpd, long_pulse_detect);
}
if (pin_mask)
@@ -2809,23 +2613,59 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
- u32 mask = GEN8_AUX_CHANNEL_A;
+ u32 mask;
+ if (INTEL_GEN(dev_priv) >= 12)
+ /* TODO: Add AUX entries for USBC */
+ return TGL_DE_PORT_AUX_DDIA |
+ TGL_DE_PORT_AUX_DDIB |
+ TGL_DE_PORT_AUX_DDIC;
+
+ mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
mask |= CNL_AUX_CHANNEL_F;
- if (INTEL_GEN(dev_priv) >= 11)
- mask |= ICL_AUX_CHANNEL_E |
- CNL_AUX_CHANNEL_F;
+ if (IS_GEN(dev_priv, 11))
+ mask |= ICL_AUX_CHANNEL_E;
return mask;
}
+static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 9)
+ return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+}
+
+static void
+gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+ bool found = false;
+
+ if (iir & GEN8_DE_MISC_GSE) {
+ intel_opregion_asle_intr(dev_priv);
+ found = true;
+ }
+
+ if (iir & GEN8_DE_EDP_PSR) {
+ u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+ intel_psr_irq_handler(dev_priv, psr_iir);
+ I915_WRITE(EDP_PSR_IIR, psr_iir);
+ found = true;
+ }
+
+ if (!found)
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2836,29 +2676,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
- bool found = false;
-
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
-
- if (iir & GEN8_DE_MISC_GSE) {
- intel_opregion_asle_intr(dev_priv);
- found = true;
- }
-
- if (iir & GEN8_DE_EDP_PSR) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
-
- intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
- found = true;
- }
-
- if (!found)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
- }
- else
+ gen8_de_misc_irq_handler(dev_priv, iir);
+ } else {
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+ }
}
if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
@@ -2938,12 +2761,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- fault_errors = iir;
- if (INTEL_GEN(dev_priv) >= 9)
- fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
- else
- fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-
+ fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
@@ -2962,7 +2780,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
+ tgp_irq_handler(dev_priv, iir);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
icp_irq_handler(dev_priv, iir, hpd_mcc);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir, hpd_icp);
@@ -3002,7 +2822,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
- struct drm_i915_private *dev_priv = to_i915(arg);
+ struct drm_i915_private *dev_priv = arg;
void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@@ -3017,7 +2837,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
/* Find, clear, then process each source of interrupt */
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -3028,140 +2848,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
return IRQ_HANDLED;
}
static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
- const unsigned int bank, const unsigned int bit)
-{
- void __iomem * const regs = i915->uncore.regs;
- u32 timeout_ts;
- u32 ident;
-
- lockdep_assert_held(&i915->irq_lock);
-
- raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
-
- /*
- * NB: Specs do not specify how long to spin wait,
- * so we do ~100us as an educated guess.
- */
- timeout_ts = (local_clock() >> 10) + 100;
- do {
- ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
- } while (!(ident & GEN11_INTR_DATA_VALID) &&
- !time_after32(local_clock() >> 10, timeout_ts));
-
- if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
- DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
- bank, bit, ident);
- return 0;
- }
-
- raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
- GEN11_INTR_DATA_VALID);
-
- return ident;
-}
-
-static void
-gen11_other_irq_handler(struct drm_i915_private * const i915,
- const u8 instance, const u16 iir)
-{
- if (instance == OTHER_GUC_INSTANCE)
- return gen11_guc_irq_handler(i915, iir);
-
- if (instance == OTHER_GTPM_INSTANCE)
- return gen11_rps_irq_handler(i915, iir);
-
- WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
- instance, iir);
-}
-
-static void
-gen11_engine_irq_handler(struct drm_i915_private * const i915,
- const u8 class, const u8 instance, const u16 iir)
+gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
{
- struct intel_engine_cs *engine;
-
- if (instance <= MAX_ENGINE_INSTANCE)
- engine = i915->engine_class[class][instance];
- else
- engine = NULL;
-
- if (likely(engine))
- return gen8_cs_irq_handler(engine, iir);
-
- WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
- class, instance);
-}
-
-static void
-gen11_gt_identity_handler(struct drm_i915_private * const i915,
- const u32 identity)
-{
- const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
- const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
- const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
-
- if (unlikely(!intr))
- return;
-
- if (class <= COPY_ENGINE_CLASS)
- return gen11_engine_irq_handler(i915, class, instance, intr);
-
- if (class == OTHER_CLASS)
- return gen11_other_irq_handler(i915, instance, intr);
-
- WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
- class, instance, intr);
-}
-
-static void
-gen11_gt_bank_handler(struct drm_i915_private * const i915,
- const unsigned int bank)
-{
- void __iomem * const regs = i915->uncore.regs;
- unsigned long intr_dw;
- unsigned int bit;
-
- lockdep_assert_held(&i915->irq_lock);
-
- intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
-
- for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
-
- gen11_gt_identity_handler(i915, ident);
- }
-
- /* Clear must be after shared has been served for engine */
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
-}
-
-static void
-gen11_gt_irq_handler(struct drm_i915_private * const i915,
- const u32 master_ctl)
-{
- unsigned int bank;
-
- spin_lock(&i915->irq_lock);
-
- for (bank = 0; bank < 2; bank++) {
- if (master_ctl & GEN11_GT_DW_IRQ(bank))
- gen11_gt_bank_handler(i915, bank);
- }
-
- spin_unlock(&i915->irq_lock);
-}
-
-static u32
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
-{
- void __iomem * const regs = dev_priv->uncore.regs;
+ void __iomem * const regs = gt->uncore->regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3175,10 +2870,10 @@ gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
}
static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
+gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
{
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(gt->i915);
}
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -3201,8 +2896,9 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
- struct drm_i915_private * const i915 = to_i915(arg);
+ struct drm_i915_private * const i915 = arg;
void __iomem * const regs = i915->uncore.regs;
+ struct intel_gt *gt = &i915->gt;
u32 master_ctl;
u32 gu_misc_iir;
@@ -3216,7 +2912,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
}
/* Find, clear, then process each source of interrupt. */
- gen11_gt_irq_handler(i915, master_ctl);
+ gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & GEN11_DISPLAY_IRQ) {
@@ -3231,11 +2927,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(gt, gu_misc_iir);
return IRQ_HANDLED;
}
@@ -3243,9 +2939,10 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3255,19 +2952,20 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i945gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
if (dev_priv->i945gm_vblank.enabled++ == 0)
schedule_work(&dev_priv->i945gm_vblank.work);
- return i8xx_enable_vblank(dev, pipe);
+ return i8xx_enable_vblank(crtc);
}
-static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3278,9 +2976,10 @@ static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3293,14 +2992,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
* PSR is active as no frames are generated.
*/
if (HAS_PSR(dev_priv))
- drm_vblank_restore(dev, pipe);
+ drm_crtc_vblank_restore(crtc);
return 0;
}
-static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int bdw_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3311,7 +3011,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
* PSR is active as no frames are generated, so check only for PSR.
*/
if (HAS_PSR(dev_priv))
- drm_vblank_restore(dev, pipe);
+ drm_crtc_vblank_restore(crtc);
return 0;
}
@@ -3319,9 +3019,10 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3329,19 +3030,20 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i945gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- i8xx_disable_vblank(dev, pipe);
+ i8xx_disable_vblank(crtc);
if (--dev_priv->i945gm_vblank.enabled == 0)
schedule_work(&dev_priv->i945gm_vblank.work);
}
-static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3350,9 +3052,10 @@ static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3362,9 +3065,10 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void bdw_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3447,10 +3151,8 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
*
* This function needs to be called before interrupts are enabled.
*/
-static void ibx_irq_pre_postinstall(struct drm_device *dev)
+static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_NOP(dev_priv))
return;
@@ -3459,26 +3161,17 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- GEN3_IRQ_RESET(uncore, GT);
- if (INTEL_GEN(dev_priv) >= 6)
- GEN3_IRQ_RESET(uncore, GEN6_PM);
-}
-
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
if (IS_CHERRYVIEW(dev_priv))
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -3519,33 +3212,30 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
/* drm_dma.h hooks
*/
-static void ironlake_irq_reset(struct drm_device *dev)
+static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
GEN3_IRQ_RESET(uncore, DE);
if (IS_GEN(dev_priv, 7))
- I915_WRITE(GEN7_ERR_INT, 0xffffffff);
+ intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
- gen5_gt_irq_reset(dev_priv);
+ gen5_gt_irq_reset(&dev_priv->gt);
ibx_irq_reset(dev_priv);
}
-static void valleyview_irq_reset(struct drm_device *dev)
+static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev_priv);
+ gen5_gt_irq_reset(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3553,28 +3243,17 @@ static void valleyview_irq_reset(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
-
- GEN8_IRQ_RESET_NDX(uncore, GT, 0);
- GEN8_IRQ_RESET_NDX(uncore, GT, 1);
- GEN8_IRQ_RESET_NDX(uncore, GT, 2);
- GEN8_IRQ_RESET_NDX(uncore, GT, 3);
-}
-
-static void gen8_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
gen8_master_intr_disable(dev_priv->uncore.regs);
- gen8_gt_irq_reset(dev_priv);
+ gen8_gt_irq_reset(&dev_priv->gt);
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3589,39 +3268,19 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
-static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
- /* Disable RCS, BCS, VCS and VECS class engines. */
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
-
- /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
-
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
-}
-
-static void gen11_irq_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
gen11_master_intr_disable(dev_priv->uncore.regs);
- gen11_gt_irq_reset(dev_priv);
+ gen11_gt_irq_reset(&dev_priv->gt);
- I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+ intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3680,18 +3339,17 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
-static void cherryview_irq_reset(struct drm_device *dev)
+static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
- gen8_gt_irq_reset(dev_priv);
+ gen8_gt_irq_reset(&dev_priv->gt);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@@ -3756,21 +3414,21 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
-static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 ddi_hotplug_enable_mask,
+ u32 tc_hotplug_enable_mask)
{
u32 hotplug;
hotplug = I915_READ(SHOTPLUG_CTL_DDI);
- hotplug |= ICP_DDIA_HPD_ENABLE |
- ICP_DDIB_HPD_ENABLE;
+ hotplug |= ddi_hotplug_enable_mask;
I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
- hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
- ICP_TC_HPD_ENABLE(PORT_TC2) |
- ICP_TC_HPD_ENABLE(PORT_TC3) |
- ICP_TC_HPD_ENABLE(PORT_TC4);
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ if (tc_hotplug_enable_mask) {
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= tc_hotplug_enable_mask;
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ }
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3782,7 +3440,33 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv);
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK);
+}
+
+static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_TGP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+}
+
+static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3807,9 +3491,11 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
+ const u32 *hpd;
u32 val;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+ hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
val = I915_READ(GEN11_DE_HPD_IMR);
@@ -3819,7 +3505,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
+ tgp_hpd_irq_setup(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv);
}
@@ -3950,9 +3638,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}
-static void ibx_irq_postinstall(struct drm_device *dev)
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
if (HAS_PCH_NOP(dev_priv))
@@ -3975,48 +3662,8 @@ static void ibx_irq_postinstall(struct drm_device *dev)
spt_hpd_detection_setup(dev_priv);
}
-static void gen5_gt_irq_postinstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 pm_irqs, gt_irqs;
-
- pm_irqs = gt_irqs = 0;
-
- dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev_priv)) {
- /* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
- gt_irqs |= GT_PARITY_ERROR(dev_priv);
- }
-
- gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN(dev_priv, 5)) {
- gt_irqs |= ILK_BSD_USER_INTERRUPT;
- } else {
- gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
- }
-
- GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
-
- if (INTEL_GEN(dev_priv) >= 6) {
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS
- * itself is enabled/disabled.
- */
- if (HAS_ENGINE(dev_priv, VECS0)) {
- pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
- }
-
- dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
- }
-}
-
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
@@ -4043,16 +3690,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask = ~display_mask;
- ibx_irq_pre_postinstall(dev);
+ ibx_irq_pre_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
- gen5_gt_irq_postinstall(dev);
+ gen5_gt_irq_postinstall(&dev_priv->gt);
ilk_hpd_detection_setup(dev_priv);
- ibx_irq_postinstall(dev);
+ ibx_irq_postinstall(dev_priv);
if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
@@ -4064,8 +3711,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irq(&dev_priv->irq_lock);
}
-
- return 0;
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
@@ -4097,11 +3742,9 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
}
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen5_gt_irq_postinstall(dev);
+ gen5_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4110,42 +3753,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
POSTING_READ(VLV_MASTER_IER);
-
- return 0;
-}
-
-static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- /* These are interrupts we'll toggle with the ring mask register */
- u32 gt_interrupts[] = {
- (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
- 0,
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
- };
-
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
- GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled. Same wil be the case for GuC interrupts.
- */
- GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
- GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -4218,58 +3825,22 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static int gen8_irq_postinstall(struct drm_device *dev)
+static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_pre_postinstall(dev);
+ ibx_irq_pre_postinstall(dev_priv);
- gen8_gt_irq_postinstall(dev_priv);
+ gen8_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev);
+ ibx_irq_postinstall(dev_priv);
gen8_master_intr_enable(dev_priv->uncore.regs);
-
- return 0;
-}
-
-static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
-
- BUILD_BUG_ON(irqs & 0xffff0000);
-
- /* Enable RCS, BCS, VCS and VECS class interrupts. */
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
-
- /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
-
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
- */
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
-
- /* Same thing for GuC interrupts */
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
}
-static void icp_irq_postinstall(struct drm_device *dev)
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask = SDE_GMBUS_ICP;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -4279,36 +3850,38 @@ static void icp_irq_postinstall(struct drm_device *dev)
gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
- icp_hpd_detection_setup(dev_priv);
+ if (HAS_PCH_TGP(dev_priv))
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK);
+ else if (HAS_PCH_MCC(dev_priv))
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ else
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK);
}
-static int gen11_irq_postinstall(struct drm_device *dev)
+static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev);
+ icp_irq_postinstall(dev_priv);
- gen11_gt_irq_postinstall(dev_priv);
+ gen11_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->uncore.regs);
+ gen11_master_intr_enable(uncore->regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
-
- return 0;
}
-static int cherryview_irq_postinstall(struct drm_device *dev)
+static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen8_gt_irq_postinstall(dev_priv);
+ gen8_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4317,13 +3890,10 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
-
- return 0;
}
-static void i8xx_irq_reset(struct drm_device *dev)
+static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_pipestat_irq_reset(dev_priv);
@@ -4331,9 +3901,8 @@ static void i8xx_irq_reset(struct drm_device *dev)
GEN2_IRQ_RESET(uncore);
}
-static int i8xx_irq_postinstall(struct drm_device *dev)
+static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
@@ -4362,8 +3931,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
-
- return 0;
}
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
@@ -4444,8 +4011,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4488,9 +4054,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
return ret;
}
-static void i915_irq_reset(struct drm_device *dev)
+static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4503,9 +4068,8 @@ static void i915_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN2_);
}
-static int i915_irq_postinstall(struct drm_device *dev)
+static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -4543,14 +4107,11 @@ static int i915_irq_postinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
-
- return 0;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4601,9 +4162,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
return ret;
}
-static void i965_irq_reset(struct drm_device *dev)
+static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4614,9 +4174,8 @@ static void i965_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN2_);
}
-static int i965_irq_postinstall(struct drm_device *dev)
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
u32 error_mask;
@@ -4666,8 +4225,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
-
- return 0;
}
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -4697,8 +4254,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4775,8 +4331,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
- if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
- dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+ /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
+ if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
+ dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
@@ -4805,11 +4362,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 8)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- dev->driver->get_vblank_counter = g4x_get_vblank_counter;
- else if (INTEL_GEN(dev_priv) >= 3)
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
-
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
@@ -4831,86 +4383,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- dev->driver->irq_handler = cherryview_irq_handler;
- dev->driver->irq_preinstall = cherryview_irq_reset;
- dev->driver->irq_postinstall = cherryview_irq_postinstall;
- dev->driver->irq_uninstall = cherryview_irq_reset;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- dev->driver->irq_handler = valleyview_irq_handler;
- dev->driver->irq_preinstall = valleyview_irq_reset;
- dev->driver->irq_postinstall = valleyview_irq_postinstall;
- dev->driver->irq_uninstall = valleyview_irq_reset;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (INTEL_GEN(dev_priv) >= 11) {
- dev->driver->irq_handler = gen11_irq_handler;
- dev->driver->irq_preinstall = gen11_irq_reset;
- dev->driver->irq_postinstall = gen11_irq_postinstall;
- dev->driver->irq_uninstall = gen11_irq_reset;
- dev->driver->enable_vblank = gen8_enable_vblank;
- dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- dev->driver->irq_handler = gen8_irq_handler;
- dev->driver->irq_preinstall = gen8_irq_reset;
- dev->driver->irq_postinstall = gen8_irq_postinstall;
- dev->driver->irq_uninstall = gen8_irq_reset;
- dev->driver->enable_vblank = gen8_enable_vblank;
- dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_GEN9_LP(dev_priv))
+ if (HAS_GMCH(dev_priv)) {
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ if (HAS_PCH_MCC(dev_priv))
+ /* EHL doesn't need most of gen11_hpd_irq_setup */
+ dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- dev->driver->irq_handler = ironlake_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_reset;
- dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_reset;
- dev->driver->enable_vblank = ironlake_enable_vblank;
- dev->driver->disable_vblank = ironlake_disable_vblank;
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else {
- if (IS_GEN(dev_priv, 2)) {
- dev->driver->irq_preinstall = i8xx_irq_reset;
- dev->driver->irq_postinstall = i8xx_irq_postinstall;
- dev->driver->irq_handler = i8xx_irq_handler;
- dev->driver->irq_uninstall = i8xx_irq_reset;
- dev->driver->enable_vblank = i8xx_enable_vblank;
- dev->driver->disable_vblank = i8xx_disable_vblank;
- } else if (IS_I945GM(dev_priv)) {
- dev->driver->irq_preinstall = i915_irq_reset;
- dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_reset;
- dev->driver->irq_handler = i915_irq_handler;
- dev->driver->enable_vblank = i945gm_enable_vblank;
- dev->driver->disable_vblank = i945gm_disable_vblank;
- } else if (IS_GEN(dev_priv, 3)) {
- dev->driver->irq_preinstall = i915_irq_reset;
- dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_reset;
- dev->driver->irq_handler = i915_irq_handler;
- dev->driver->enable_vblank = i8xx_enable_vblank;
- dev->driver->disable_vblank = i8xx_disable_vblank;
- } else {
- dev->driver->irq_preinstall = i965_irq_reset;
- dev->driver->irq_postinstall = i965_irq_postinstall;
- dev->driver->irq_uninstall = i965_irq_reset;
- dev->driver->irq_handler = i965_irq_handler;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- }
- if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
}
@@ -4931,6 +4418,75 @@ void intel_irq_fini(struct drm_i915_private *i915)
kfree(i915->l3_parity.remap_info[i]);
}
+static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return cherryview_irq_handler;
+ else if (IS_VALLEYVIEW(dev_priv))
+ return valleyview_irq_handler;
+ else if (IS_GEN(dev_priv, 4))
+ return i965_irq_handler;
+ else if (IS_GEN(dev_priv, 3))
+ return i915_irq_handler;
+ else
+ return i8xx_irq_handler;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ return gen11_irq_handler;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ return gen8_irq_handler;
+ else
+ return ironlake_irq_handler;
+ }
+}
+
+static void intel_irq_reset(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_reset(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_reset(dev_priv);
+ else if (IS_GEN(dev_priv, 4))
+ i965_irq_reset(dev_priv);
+ else if (IS_GEN(dev_priv, 3))
+ i915_irq_reset(dev_priv);
+ else
+ i8xx_irq_reset(dev_priv);
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_irq_reset(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_irq_reset(dev_priv);
+ else
+ ironlake_irq_reset(dev_priv);
+ }
+}
+
+static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_postinstall(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_postinstall(dev_priv);
+ else if (IS_GEN(dev_priv, 4))
+ i965_irq_postinstall(dev_priv);
+ else if (IS_GEN(dev_priv, 3))
+ i915_irq_postinstall(dev_priv);
+ else
+ i8xx_irq_postinstall(dev_priv);
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_irq_postinstall(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_irq_postinstall(dev_priv);
+ else
+ ironlake_irq_postinstall(dev_priv);
+ }
+}
+
/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
@@ -4944,6 +4500,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
*/
int intel_irq_install(struct drm_i915_private *dev_priv)
{
+ int irq = dev_priv->drm.pdev->irq;
+ int ret;
+
/*
* We enable some interrupt sources in our postinstall hooks, so mark
* interrupts as enabled _before_ actually enabling them to avoid
@@ -4951,7 +4510,20 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
dev_priv->runtime_pm.irqs_enabled = true;
- return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
+ dev_priv->drm.irq_enabled = true;
+
+ intel_irq_reset(dev_priv);
+
+ ret = request_irq(irq, intel_irq_handler(dev_priv),
+ IRQF_SHARED, DRIVER_NAME, dev_priv);
+ if (ret < 0) {
+ dev_priv->drm.irq_enabled = false;
+ return ret;
+ }
+
+ intel_irq_postinstall(dev_priv);
+
+ return ret;
}
/**
@@ -4963,7 +4535,23 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- drm_irq_uninstall(&dev_priv->drm);
+ int irq = dev_priv->drm.pdev->irq;
+
+ /*
+ * FIXME we can get called twice during driver load
+ * error handling due to intel_modeset_cleanup()
+ * calling us out of sequence. Would be nice if
+ * it didn't do that...
+ */
+ if (!dev_priv->drm.irq_enabled)
+ return;
+
+ dev_priv->drm.irq_enabled = false;
+
+ intel_irq_reset(dev_priv);
+
+ free_irq(irq, dev_priv);
+
intel_hpd_cancel_work(dev_priv);
dev_priv->runtime_pm.irqs_enabled = false;
}
@@ -4977,9 +4565,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
- dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
+ intel_irq_reset(dev_priv);
dev_priv->runtime_pm.irqs_enabled = false;
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
/**
@@ -4992,6 +4580,20 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->runtime_pm.irqs_enabled = true;
- dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
- dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
+ intel_irq_reset(dev_priv);
+ intel_irq_postinstall(dev_priv);
+}
+
+bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return dev_priv->runtime_pm.irqs_enabled;
+}
+
+void intel_synchronize_irq(struct drm_i915_private *i915)
+{
+ synchronize_irq(i915->drm.pdev->irq);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index cb25dd213308..8e7e6071777e 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -6,15 +6,27 @@
#ifndef __I915_IRQ_H__
#define __I915_IRQ_H__
+#include <linux/ktime.h>
#include <linux/types.h>
-#include "i915_drv.h"
+#include "display/intel_display.h"
+#include "i915_reg.h"
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
struct drm_i915_private;
struct intel_crtc;
+struct intel_crtc;
+struct intel_gt;
+struct intel_guc;
+struct intel_uncore;
+
+void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir);
+void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_irq_fini(struct drm_i915_private *dev_priv);
+void intel_irq_init(struct drm_i915_private *dev_priv);
+void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
@@ -77,41 +89,89 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-
-static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
- u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
+u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
-static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
-{
- /*
- * We only use drm_irq_uninstall() at unload and VT switch, so
- * this is the only thing we need to check.
- */
- return dev_priv->runtime_pm.irqs_enabled;
-}
+bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
+void intel_synchronize_irq(struct drm_i915_private *i915);
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen11_reset_guc_interrupts(struct drm_i915_private *i915);
-void gen11_enable_guc_interrupts(struct drm_i915_private *i915);
-void gen11_disable_guc_interrupts(struct drm_i915_private *i915);
+void gen9_reset_guc_interrupts(struct intel_guc *guc);
+void gen9_enable_guc_interrupts(struct intel_guc *guc);
+void gen9_disable_guc_interrupts(struct intel_guc *guc);
+void gen11_reset_guc_interrupts(struct intel_guc *guc);
+void gen11_enable_guc_interrupts(struct intel_guc *guc);
+void gen11_disable_guc_interrupts(struct intel_guc *guc);
+
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+u32 i915_get_vblank_counter(struct drm_crtc *crtc);
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
+
+int i8xx_enable_vblank(struct drm_crtc *crtc);
+int i945gm_enable_vblank(struct drm_crtc *crtc);
+int i965_enable_vblank(struct drm_crtc *crtc);
+int ilk_enable_vblank(struct drm_crtc *crtc);
+int bdw_enable_vblank(struct drm_crtc *crtc);
+void i8xx_disable_vblank(struct drm_crtc *crtc);
+void i945gm_disable_vblank(struct drm_crtc *crtc);
+void i965_disable_vblank(struct drm_crtc *crtc);
+void ilk_disable_vblank(struct drm_crtc *crtc);
+void bdw_disable_vblank(struct drm_crtc *crtc);
+
+void gen2_irq_reset(struct intel_uncore *uncore);
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier);
+
+void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val);
+void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir);
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+ GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+ gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+ gen2_irq_reset(uncore)
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_init((uncore), \
+ GEN8_##type##_IMR(which_), imr_val, \
+ GEN8_##type##_IER(which_), ier_val, \
+ GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+ gen3_irq_init((uncore), \
+ type##IMR, imr_val, \
+ type##IER, ier_val, \
+ type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+ gen2_irq_init((uncore), imr_val, ier_val)
#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 79f8ec756362..07b04b0acb77 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -25,7 +25,7 @@
#include <linux/kernel.h>
#include <asm/fpu/api.h>
-#include "i915_drv.h"
+#include "i915_memcpy.h"
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h
new file mode 100644
index 000000000000..970d84b16987
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_memcpy.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_MEMCPY_H__
+#define __I915_MEMCPY_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+void i915_memcpy_init_early(struct drm_i915_private *i915);
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+
+/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
+ * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
+ * perform the operation. To check beforehand, pass in the parameters to
+ * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
+ * you only need to pass in the minor offsets, page-aligned pointers are
+ * always valid.
+ *
+ * For just checking for SSE4.1, in the foreknowledge that the future use
+ * will be correctly aligned, just use i915_has_memcpy_from_wc().
+ */
+#define i915_can_memcpy_from_wc(dst, src, len) \
+ i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+
+#define i915_has_memcpy_from_wc() \
+ i915_memcpy_from_wc(NULL, NULL, 0)
+
+#endif /* __I915_MEMCPY_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index c23bb29e6d3e..318562ce64c0 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -63,9 +63,8 @@ int remap_io_mapping(struct vm_area_struct *vma,
struct remap_pfn r;
int err;
- GEM_BUG_ON((vma->vm_flags &
- (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) !=
- (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP));
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
r.mm = vma->vm_mm;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
deleted file mode 100644
index 0e667f1a8aa1..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BDW_H__
-#define __I915_OA_BDW_H__
-
-extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
deleted file mode 100644
index 679e92cf4f1d..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_BXT_H__
-#define __I915_OA_BXT_H__
-
-extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
deleted file mode 100644
index 4d6025559bbe..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT2_H__
-#define __I915_OA_CFLGT2_H__
-
-extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
deleted file mode 100644
index 0697f4077402..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CFLGT3_H__
-#define __I915_OA_CFLGT3_H__
-
-extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
deleted file mode 100644
index 0986eae3135f..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CHV_H__
-#define __I915_OA_CHV_H__
-
-extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
deleted file mode 100644
index e830a406aff2..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_CNL_H__
-#define __I915_OA_CNL_H__
-
-extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
deleted file mode 100644
index 06dedf991edb..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_GLK_H__
-#define __I915_OA_GLK_H__
-
-extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
deleted file mode 100644
index 3d0c870cd0bd..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_HSW_H__
-#define __I915_OA_HSW_H__
-
-extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
deleted file mode 100644
index 24eaa97d61ba..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_ICL_H__
-#define __I915_OA_ICL_H__
-
-extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
deleted file mode 100644
index a55398a904de..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT2_H__
-#define __I915_OA_KBLGT2_H__
-
-extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
deleted file mode 100644
index 3ddd3483b7cc..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_KBLGT3_H__
-#define __I915_OA_KBLGT3_H__
-
-extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
deleted file mode 100644
index be6256037239..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT2_H__
-#define __I915_OA_SKLGT2_H__
-
-extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
deleted file mode 100644
index 650beb068e56..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT3_H__
-#define __I915_OA_SKLGT3_H__
-
-extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
deleted file mode 100644
index 8dcf849d131e..000000000000
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- *
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- */
-
-#ifndef __I915_OA_SKLGT4_H__
-#define __I915_OA_SKLGT4_H__
-
-extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 5b07766a1c26..296452f9efe4 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -169,8 +169,9 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, bool, 0600,
- "Enable support for DPCD backlight control (default:false)");
+i915_param_named(enable_dpcd_backlight, int, 0600,
+ "Enable support for DPCD backlight control"
+ "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index a4770ce46bd2..d29ade3b7de6 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,6 +64,7 @@ struct drm_printer;
param(int, reset, 2) \
param(unsigned int, inject_load_failure, 0) \
param(int, fastboot, -1) \
+ param(int, enable_dpcd_backlight, 0) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
@@ -76,7 +77,6 @@ struct drm_printer;
param(bool, verbose_state_checks, true) \
param(bool, nuclear_pageflip, false) \
param(bool, enable_dp_mst, true) \
- param(bool, enable_dpcd_backlight, false) \
param(bool, enable_gvt, false)
#define MEMBER(T, member, ...) T member;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6c9f46fc3e12..1974e4c78a43 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -522,8 +522,6 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
GEN(8), \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
@@ -586,8 +584,7 @@ static const struct intel_device_info intel_cherryview_info = {
#define GEN9_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ I915_GTT_PAGE_SIZE_64K
#define GEN9_FEATURES \
GEN8_FEATURES, \
@@ -595,7 +592,7 @@ static const struct intel_device_info intel_cherryview_info = {
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
- .has_guc = 1, \
+ .has_gt_uc = 1, \
.display.has_ipc = 1, \
.ddb_size = 896
@@ -647,7 +644,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
- .has_guc = 1, \
+ .has_gt_uc = 1, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_reset_engine = 1, \
@@ -727,8 +724,14 @@ static const struct intel_device_info intel_cannonlake_info = {
.gt = 2,
};
+#define GEN11_DEFAULT_PAGE_SIZES \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
+
#define GEN11_FEATURES \
GEN10_FEATURES, \
+ GEN11_DEFAULT_PAGE_SIZES, \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -761,10 +764,41 @@ static const struct intel_device_info intel_elkhartlake_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.require_force_probe = 1,
- .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+ .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.ppgtt_size = 36,
};
+#define GEN12_FEATURES \
+ GEN11_FEATURES, \
+ GEN(12), \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_D] = PIPE_D_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+ }, \
+ .has_global_mocs = 1
+
+static const struct intel_device_info intel_tigerlake_12_info = {
+ GEN12_FEATURES,
+ PLATFORM(INTEL_TIGERLAKE),
+ .num_pipes = 4,
+ .require_force_probe = 1,
+ .display.has_modular_fia = 1,
+ .engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+};
+
#undef GEN
#undef PLATFORM
@@ -836,22 +870,23 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
INTEL_EHL_IDS(&intel_elkhartlake_info),
+ INTEL_TGL_12_IDS(&intel_tigerlake_12_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
- struct drm_device *dev;
+ struct drm_i915_private *i915;
- dev = pci_get_drvdata(pdev);
- if (!dev) /* driver load aborted, nothing to cleanup */
+ i915 = pci_get_drvdata(pdev);
+ if (!i915) /* driver load aborted, nothing to cleanup */
return;
- i915_driver_unload(dev);
- drm_dev_put(dev);
-
+ i915_driver_remove(i915);
pci_set_drvdata(pdev, NULL);
+
+ drm_dev_put(&i915->drm);
}
/* is device_id present in comma separated list of ids */
@@ -923,11 +958,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
- err = i915_driver_load(pdev, ent);
+ err = i915_driver_probe(pdev, ent);
if (err)
return err;
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure(pci_get_drvdata(pdev))) {
i915_pci_remove(pdev);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 5140017f9a39..e42b86827d6b 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -200,20 +200,21 @@
#include "gt/intel_lrc_reg.h"
#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-#include "i915_oa_bdw.h"
-#include "i915_oa_chv.h"
-#include "i915_oa_sklgt2.h"
-#include "i915_oa_sklgt3.h"
-#include "i915_oa_sklgt4.h"
-#include "i915_oa_bxt.h"
-#include "i915_oa_kblgt2.h"
-#include "i915_oa_kblgt3.h"
-#include "i915_oa_glk.h"
-#include "i915_oa_cflgt2.h"
-#include "i915_oa_cflgt3.h"
-#include "i915_oa_cnl.h"
-#include "i915_oa_icl.h"
+#include "i915_perf.h"
+#include "oa/i915_oa_hsw.h"
+#include "oa/i915_oa_bdw.h"
+#include "oa/i915_oa_chv.h"
+#include "oa/i915_oa_sklgt2.h"
+#include "oa/i915_oa_sklgt3.h"
+#include "oa/i915_oa_sklgt4.h"
+#include "oa/i915_oa_bxt.h"
+#include "oa/i915_oa_kblgt2.h"
+#include "oa/i915_oa_kblgt3.h"
+#include "oa/i915_oa_glk.h"
+#include "oa/i915_oa_cflgt2.h"
+#include "oa/i915_oa_cflgt3.h"
+#include "oa/i915_oa_cnl.h"
+#include "oa/i915_oa_icl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -364,6 +365,8 @@ struct perf_open_properties {
int oa_period_exponent;
};
+static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
+
static void free_oa_config(struct drm_i915_private *dev_priv,
struct i915_oa_config *oa_config)
{
@@ -392,8 +395,8 @@ static int get_oa_config(struct drm_i915_private *dev_priv,
int ret;
if (metrics_set == 1) {
- *out_config = &dev_priv->perf.oa.test_config;
- atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
+ *out_config = &dev_priv->perf.test_config;
+ atomic_inc(&dev_priv->perf.test_config.ref_count);
return 0;
}
@@ -412,13 +415,16 @@ static int get_oa_config(struct drm_i915_private *dev_priv,
return ret;
}
-static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
}
-static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
@@ -426,7 +432,7 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
/**
* oa_buffer_check_unlocked - check for data and update tail ptr state
- * @dev_priv: i915 device instance
+ * @stream: i915 stream instance
*
* This is either called via fops (for blocking reads in user ctx) or the poll
* check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
@@ -448,9 +454,10 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
*
* Returns: %true if the OA buffer contains data, else %false
*/
-static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
+static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = stream->oa_buffer.format_size;
unsigned long flags;
unsigned int aged_idx;
u32 head, hw_tail, aged_tail, aging_tail;
@@ -460,19 +467,19 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
* could result in an OA buffer reset which might reset the head,
* tails[] and aged_tail state.
*/
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* NB: The head we observe here might effectively be a little out of
* date (between head and tails[aged_idx].offset if there is currently
* a read() in progress.
*/
- head = dev_priv->perf.oa.oa_buffer.head;
+ head = stream->oa_buffer.head;
- aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
- aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
+ aged_idx = stream->oa_buffer.aged_tail_idx;
+ aged_tail = stream->oa_buffer.tails[aged_idx].offset;
+ aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
- hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
+ hw_tail = dev_priv->perf.ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
* not in report_size steps...
@@ -492,16 +499,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
* available) without needing to wait for a later hrtimer callback.
*/
if (aging_tail != INVALID_TAIL_PTR &&
- ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
+ ((now - stream->oa_buffer.aging_timestamp) >
OA_TAIL_MARGIN_NSEC)) {
aged_idx ^= 1;
- dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
+ stream->oa_buffer.aged_tail_idx = aged_idx;
aged_tail = aging_tail;
/* Mark that we need a new pointer to start aging... */
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
aging_tail = INVALID_TAIL_PTR;
}
@@ -516,7 +523,7 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
if (aging_tail == INVALID_TAIL_PTR &&
(aged_tail == INVALID_TAIL_PTR ||
OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
- struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
+ struct i915_vma *vma = stream->oa_buffer.vma;
u32 gtt_offset = i915_ggtt_offset(vma);
/* Be paranoid and do a bounds check on the pointer read back
@@ -525,16 +532,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
*/
if (hw_tail >= gtt_offset &&
hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
+ stream->oa_buffer.tails[!aged_idx].offset =
aging_tail = hw_tail;
- dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
+ stream->oa_buffer.aging_timestamp = now;
} else {
DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
hw_tail);
}
}
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
return aged_tail == INVALID_TAIL_PTR ?
false : OA_TAKEN(aged_tail, head) >= report_size;
@@ -597,8 +604,7 @@ static int append_oa_sample(struct i915_perf_stream *stream,
size_t *offset,
const u8 *report)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format_size;
struct drm_i915_perf_record_header header;
u32 sample_flags = stream->sample_flags;
@@ -650,9 +656,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t *offset)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ int report_size = stream->oa_buffer.format_size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
@@ -664,13 +670,13 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (WARN_ON(!stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ head = stream->oa_buffer.head;
+ aged_tail_idx = stream->oa_buffer.aged_tail_idx;
+ tail = stream->oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* An invalid tail pointer here means we're still waiting for the poll
@@ -734,12 +740,12 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
OAREPORT_REASON_MASK);
if (reason == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+ if (__ratelimit(&dev_priv->perf.spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
- ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
+ ctx_id = report32[2] & stream->specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -749,7 +755,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
*/
- if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
+ if (!(report32[0] & dev_priv->perf.gen8_valid_ctx_bit))
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -783,18 +789,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!dev_priv->perf.oa.exclusive_stream->ctx ||
- dev_priv->perf.oa.specific_ctx_id == ctx_id ||
- (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
- dev_priv->perf.oa.specific_ctx_id) ||
+ if (!dev_priv->perf.exclusive_stream->ctx ||
+ stream->specific_ctx_id == ctx_id ||
+ stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
/*
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (dev_priv->perf.oa.exclusive_stream->ctx &&
- dev_priv->perf.oa.specific_ctx_id != ctx_id) {
+ if (dev_priv->perf.exclusive_stream->ctx &&
+ stream->specific_ctx_id != ctx_id) {
report32[2] = INVALID_CTX_ID;
}
@@ -803,7 +808,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
+ stream->oa_buffer.last_ctx_id = ctx_id;
}
/*
@@ -817,7 +822,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/*
* We removed the gtt_offset for the copy loop above, indexing
@@ -826,9 +831,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
head += gtt_offset;
I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
- dev_priv->perf.oa.oa_buffer.head = head;
+ stream->oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -863,7 +868,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
u32 oastatus;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
oastatus = I915_READ(GEN8_OASTATUS);
@@ -889,10 +894,10 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ dev_priv->perf.ops.oa_disable(stream);
+ dev_priv->perf.ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
@@ -939,9 +944,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t *offset)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ int report_size = stream->oa_buffer.format_size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
@@ -953,13 +958,13 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
if (WARN_ON(!stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ head = stream->oa_buffer.head;
+ aged_tail_idx = stream->oa_buffer.aged_tail_idx;
+ tail = stream->oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* An invalid tail pointer here means we're still waiting for the poll
* hrtimer callback to give us a pointer
@@ -1012,7 +1017,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+ if (__ratelimit(&dev_priv->perf.spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -1031,7 +1036,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* We removed the gtt_offset for the copy loop above, indexing
* relative to oa_buf_base so put back here...
@@ -1041,9 +1046,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
I915_WRITE(GEN7_OASTATUS2,
((head & GEN7_OASTATUS2_HEAD_MASK) |
GEN7_OASTATUS2_MEM_SELECT_GGTT));
- dev_priv->perf.oa.oa_buffer.head = head;
+ stream->oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -1074,7 +1079,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
u32 oastatus1;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
oastatus1 = I915_READ(GEN7_OASTATUS1);
@@ -1084,7 +1089,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
* may be updated asynchronously) so we ignore status bits
* that have already been reported to userspace.
*/
- oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
+ oastatus1 &= ~dev_priv->perf.gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
*
@@ -1113,10 +1118,10 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ dev_priv->perf.ops.oa_disable(stream);
+ dev_priv->perf.ops.oa_enable(stream);
oastatus1 = I915_READ(GEN7_OASTATUS1);
}
@@ -1126,7 +1131,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- dev_priv->perf.oa.gen7_latched_oastatus1 |=
+ dev_priv->perf.gen7_latched_oastatus1 |=
GEN7_OASTATUS1_REPORT_LOST;
}
@@ -1149,14 +1154,12 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
*/
static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
/* We would wait indefinitely if periodic sampling is not enabled */
- if (!dev_priv->perf.oa.periodic)
+ if (!stream->periodic)
return -EIO;
- return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
- oa_buffer_check_unlocked(dev_priv));
+ return wait_event_interruptible(stream->poll_wq,
+ oa_buffer_check_unlocked(stream));
}
/**
@@ -1173,9 +1176,7 @@ static void i915_oa_poll_wait(struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
+ poll_wait(file, &stream->poll_wq, wait);
}
/**
@@ -1197,13 +1198,14 @@ static int i915_oa_read(struct i915_perf_stream *stream,
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
+ return dev_priv->perf.ops.read(stream, buf, count, offset);
}
-static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
{
struct i915_gem_engines_iter it;
+ struct drm_i915_private *i915 = stream->dev_priv;
+ struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
int err;
@@ -1221,7 +1223,7 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
*/
err = intel_context_pin(ce);
if (err == 0) {
- i915->perf.oa.pinned_ctx = ce;
+ stream->pinned_ctx = ce;
break;
}
}
@@ -1231,7 +1233,7 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
if (err)
return ERR_PTR(err);
- return i915->perf.oa.pinned_ctx;
+ return stream->pinned_ctx;
}
/**
@@ -1249,7 +1251,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
- ce = oa_pin_context(i915, stream->ctx);
+ ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -1259,8 +1261,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* On Haswell we don't do any post processing of the reports
* and don't need to use the mask.
*/
- i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
- i915->perf.oa.specific_ctx_id_mask = 0;
+ stream->specific_ctx_id = i915_ggtt_offset(ce->state);
+ stream->specific_ctx_id_mask = 0;
break;
}
@@ -1278,33 +1280,33 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* dropped by GuC. They won't be part of the context
* ID in the OA reports, so squash those lower bits.
*/
- i915->perf.oa.specific_ctx_id =
+ stream->specific_ctx_id =
lower_32_bits(ce->lrc_desc) >> 12;
/*
* GuC uses the top bit to signal proxy submission, so
* ignore that bit.
*/
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
} else {
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
(1U << GEN8_CTX_ID_WIDTH) - 1;
- i915->perf.oa.specific_ctx_id =
+ stream->specific_ctx_id =
upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
+ stream->specific_ctx_id &=
+ stream->specific_ctx_id_mask;
}
break;
case 11: {
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
- i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
+ stream->specific_ctx_id = upper_32_bits(ce->lrc_desc);
+ stream->specific_ctx_id &=
+ stream->specific_ctx_id_mask;
break;
}
@@ -1313,8 +1315,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
}
DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- i915->perf.oa.specific_ctx_id,
- i915->perf.oa.specific_ctx_id_mask);
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
return 0;
}
@@ -1331,10 +1333,10 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- dev_priv->perf.oa.specific_ctx_id_mask = 0;
+ stream->specific_ctx_id = INVALID_CTX_ID;
+ stream->specific_ctx_id_mask = 0;
- ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ ce = fetch_and_zero(&stream->pinned_ctx);
if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex);
intel_context_unpin(ce);
@@ -1343,34 +1345,36 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
}
static void
-free_oa_buffer(struct drm_i915_private *i915)
+free_oa_buffer(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *i915 = stream->dev_priv;
+
mutex_lock(&i915->drm.struct_mutex);
- i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
+ i915_vma_unpin_and_release(&stream->oa_buffer.vma,
I915_VMA_RELEASE_MAP);
mutex_unlock(&i915->drm.struct_mutex);
- i915->perf.oa.oa_buffer.vaddr = NULL;
+ stream->oa_buffer.vaddr = NULL;
}
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
+ BUG_ON(stream != dev_priv->perf.exclusive_stream);
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.oa.exclusive_stream = NULL;
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ dev_priv->perf.exclusive_stream = NULL;
+ dev_priv->perf.ops.disable_metric_set(stream);
mutex_unlock(&dev_priv->drm.struct_mutex);
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
@@ -1380,41 +1384,42 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
put_oa_config(dev_priv, stream->oa_config);
- if (dev_priv->perf.oa.spurious_report_rs.missed) {
+ if (dev_priv->perf.spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
- dev_priv->perf.oa.spurious_report_rs.missed);
+ dev_priv->perf.spurious_report_rs.missed);
}
}
-static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
I915_WRITE(GEN7_OASTATUS2,
gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ stream->oa_buffer.head = gtt_offset;
I915_WRITE(GEN7_OABUFFER, gtt_offset);
I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* On Haswell we have to track which OASTATUS1 flags we've
* already seen since they can't be cleared while periodic
* sampling is enabled.
*/
- dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
+ dev_priv->perf.gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
* zeroed via shmfs (and so this memset is redundant when
@@ -1427,24 +1432,25 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
-static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
I915_WRITE(GEN8_OASTATUS, 0);
I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ stream->oa_buffer.head = gtt_offset;
I915_WRITE(GEN8_OABUFFER_UDW, 0);
@@ -1461,17 +1467,17 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
/*
* Reset state used to recognise context switches, affecting which
* reports we will forward to userspace while filtering for a single
* context.
*/
- dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* NB: although the OA buffer will initially be allocated
@@ -1485,22 +1491,23 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
/*
* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
-static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
+static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
struct drm_i915_gem_object *bo;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_vma *vma;
int ret;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
+ if (WARN_ON(stream->oa_buffer.vma))
return -ENODEV;
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
@@ -1525,18 +1532,18 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
ret = PTR_ERR(vma);
goto err_unref;
}
- dev_priv->perf.oa.oa_buffer.vma = vma;
+ stream->oa_buffer.vma = vma;
- dev_priv->perf.oa.oa_buffer.vaddr =
+ stream->oa_buffer.vaddr =
i915_gem_object_pin_map(bo, I915_MAP_WB);
- if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
- ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
+ if (IS_ERR(stream->oa_buffer.vaddr)) {
+ ret = PTR_ERR(stream->oa_buffer.vaddr);
goto err_unpin;
}
DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
- i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
- dev_priv->perf.oa.oa_buffer.vaddr);
+ i915_ggtt_offset(stream->oa_buffer.vma),
+ stream->oa_buffer.vaddr);
goto unlock;
@@ -1546,8 +1553,8 @@ err_unpin:
err_unref:
i915_gem_object_put(bo);
- dev_priv->perf.oa.oa_buffer.vaddr = NULL;
- dev_priv->perf.oa.oa_buffer.vma = NULL;
+ stream->oa_buffer.vaddr = NULL;
+ stream->oa_buffer.vma = NULL;
unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1623,8 +1630,10 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
return 0;
}
-static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
+static void hsw_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
@@ -1634,6 +1643,27 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
~GT_NOA_ENABLE));
}
+static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
+ i915_reg_t reg)
+{
+ u32 mmio = i915_mmio_reg_offset(reg);
+ int i;
+
+ /*
+ * This arbitrary default will select the 'EU FPU0 Pipeline
+ * Active' event. In the future it's anticipated that there
+ * will be an explicit 'No Event' we can select, but not yet...
+ */
+ if (!oa_config)
+ return 0;
+
+ for (i = 0; i < oa_config->flex_regs_len; i++) {
+ if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
+ return oa_config->flex_regs[i].value;
+ }
+
+ return 0;
+}
/*
* NB: It must always remain pointer safe to run this even if the OA unit
* has been disabled.
@@ -1642,13 +1672,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* in the case that the OA unit has been disabled.
*/
static void
-gen8_update_reg_state_unlocked(struct intel_context *ce,
+gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
+ struct intel_context *ce,
u32 *reg_state,
const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *i915 = ce->gem_context->i915;
- u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1662,38 +1693,143 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
- u32 state_offset = ctx_flexeu0 + i * 2;
- u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
+ CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i],
+ oa_config_flex_reg(oa_config, flex_regs[i]));
+ }
- /*
- * This arbitrary default will select the 'EU FPU0 Pipeline
- * Active' event. In the future it's anticipated that there
- * will be an explicit 'No Event' we can select, but not yet...
- */
- u32 value = 0;
+ CTX_REG(reg_state,
+ CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ intel_sseu_make_rpcs(i915, &ce->sseu));
+}
- if (oa_config) {
- u32 j;
+struct flex {
+ i915_reg_t reg;
+ u32 offset;
+ u32 value;
+};
- for (j = 0; j < oa_config->flex_regs_len; j++) {
- if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
- value = oa_config->flex_regs[j].value;
- break;
- }
- }
- }
+static int
+gen8_store_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ do {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset + (flex->offset + 1) * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = flex->value;
+ } while (flex++, --count);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_load_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(!count || count > 63);
+
+ cs = intel_ring_begin(rq, 2 * count + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *cs++ = i915_mmio_reg_offset(flex->reg);
+ *cs++ = flex->value;
+ } while (flex++, --count);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen8_modify_context(struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ struct i915_request *rq;
+ int err;
+
+ lockdep_assert_held(&ce->pin_mutex);
+
+ rq = i915_request_create(ce->engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ /* Serialise with the remote context */
+ err = intel_context_prepare_remote_request(ce, rq);
+ if (err == 0)
+ err = gen8_store_flex(rq, ce, flex, count);
+
+ i915_request_add(rq);
+ return err;
+}
+
+static int gen8_modify_self(struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = gen8_load_flex(rq, ce, flex, count);
+
+ i915_request_add(rq);
+ return err;
+}
+
+static int gen8_configure_context(struct i915_gem_context *ctx,
+ struct flex *flex, unsigned int count)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ GEM_BUG_ON(ce == ce->engine->kernel_context);
+
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
- CTX_REG(reg_state, state_offset, flex_regs[i], value);
+ err = intel_context_lock_pinned(ce);
+ if (err)
+ break;
+
+ flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+
+ /* Otherwise OA settings will be set upon first use */
+ if (intel_context_is_pinned(ce))
+ err = gen8_modify_context(ce, flex, count);
+
+ intel_context_unlock_pinned(ce);
+ if (err)
+ break;
}
+ i915_gem_context_unlock_engines(ctx);
- CTX_REG(reg_state,
- CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- intel_sseu_make_rpcs(i915, &ce->sseu));
+ return err;
}
/*
@@ -1720,15 +1856,42 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
*
* Note: it's only the RCS/Render context that has any OA state.
*/
-static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
+static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config)
{
- unsigned int map_type = i915_coherent_map_type(dev_priv);
+ struct drm_i915_private *i915 = stream->dev_priv;
+ /* The MMIO offsets for Flex EU registers aren't contiguous */
+ const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N))
+ struct flex regs[] = {
+ {
+ GEN8_R_PWR_CLK_STATE,
+ CTX_R_PWR_CLK_STATE,
+ },
+ {
+ GEN8_OACTXCONTROL,
+ i915->perf.ctx_oactxctrl_offset,
+ ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME)
+ },
+ { EU_PERF_CNTL0, ctx_flexeuN(0) },
+ { EU_PERF_CNTL1, ctx_flexeuN(1) },
+ { EU_PERF_CNTL2, ctx_flexeuN(2) },
+ { EU_PERF_CNTL3, ctx_flexeuN(3) },
+ { EU_PERF_CNTL4, ctx_flexeuN(4) },
+ { EU_PERF_CNTL5, ctx_flexeuN(5) },
+ { EU_PERF_CNTL6, ctx_flexeuN(6) },
+ };
+#undef ctx_flexeuN
+ struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
- struct i915_request *rq;
- int ret;
+ int i;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ for (i = 2; i < ARRAY_SIZE(regs); i++)
+ regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
/*
* The OA register config is setup through the context image. This image
@@ -1740,58 +1903,41 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* this might leave small interval of time where the OA unit is
* configured at an invalid sampling period.
*
- * So far the best way to work around this issue seems to be draining
- * the GPU from any submitted work.
+ * Note that since we emit all requests from a single ring, there
+ * is still an implicit global barrier here that may cause a high
+ * priority context to wait for an otherwise independent low priority
+ * context. Contexts idle at the time of reconfiguration are not
+ * trapped behind the barrier.
*/
- ret = i915_gem_wait_for_idle(dev_priv,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- /* Update all contexts now that we've stalled the submission. */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx),
- it) {
- u32 *regs;
-
- if (ce->engine->class != RENDER_CLASS)
- continue;
-
- /* OA settings will be set upon first use */
- if (!ce->state)
- continue;
-
- regs = i915_gem_object_pin_map(ce->state->obj,
- map_type);
- if (IS_ERR(regs)) {
- i915_gem_context_unlock_engines(ctx);
- return PTR_ERR(regs);
- }
-
- ce->state->obj->mm.dirty = true;
- regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+ list_for_each_entry(ctx, &i915->contexts.list, link) {
+ int err;
- gen8_update_reg_state_unlocked(ce, regs, oa_config);
+ if (ctx == i915->kernel_context)
+ continue;
- i915_gem_object_unpin_map(ce->state->obj);
- }
- i915_gem_context_unlock_engines(ctx);
+ err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
+ if (err)
+ return err;
}
/*
- * Apply the configuration by doing one context restore of the edited
- * context image.
+ * After updating all other contexts, we need to modify ourselves.
+ * If we don't modify the kernel_context, we do not get events while
+ * idle.
*/
- rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce = engine->kernel_context;
+ int err;
- i915_request_add(rq);
+ if (engine->class != RENDER_CLASS)
+ continue;
+
+ regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
+
+ err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs));
+ if (err)
+ return err;
+ }
return 0;
}
@@ -1836,7 +1982,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, oa_config);
+ ret = gen8_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
@@ -1849,19 +1995,23 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
return 0;
}
-static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen8_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ gen8_configure_all_contexts(stream, NULL);
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
~GT_NOA_ENABLE));
}
-static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen10_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ gen8_configure_all_contexts(stream, NULL);
/* Make sure we disable noa to save power. */
I915_WRITE(RPM_CONFIG1,
@@ -1872,10 +2022,10 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_gem_context *ctx = stream->ctx;
- u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
- bool periodic = dev_priv->perf.oa.periodic;
- u32 period_exponent = dev_priv->perf.oa.period_exponent;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ u32 ctx_id = stream->specific_ctx_id;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ u32 report_format = stream->oa_buffer.format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -1886,7 +2036,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen7_init_oa_buffer(dev_priv);
+ gen7_init_oa_buffer(stream);
I915_WRITE(GEN7_OACONTROL,
(ctx_id & GEN7_OACONTROL_CTX_MASK) |
@@ -1901,7 +2051,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ u32 report_format = stream->oa_buffer.format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -1912,7 +2062,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen8_init_oa_buffer(dev_priv);
+ gen8_init_oa_buffer(stream);
/*
* Note: we don't rely on the hardware to perform single context
@@ -1937,10 +2087,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_enable(stream);
+ dev_priv->perf.ops.oa_enable(stream);
- if (dev_priv->perf.oa.periodic)
- hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
+ if (stream->periodic)
+ hrtimer_start(&stream->poll_check_timer,
ns_to_ktime(POLL_PERIOD),
HRTIMER_MODE_REL_PINNED);
}
@@ -1979,10 +2129,10 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.ops.oa_disable(stream);
- if (dev_priv->perf.oa.periodic)
- hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
+ if (stream->periodic)
+ hrtimer_cancel(&stream->poll_check_timer);
}
static const struct i915_perf_stream_ops i915_oa_stream_ops = {
@@ -2034,7 +2184,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.oa.ops.enable_metric_set) {
+ if (!dev_priv->perf.ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
@@ -2043,7 +2193,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (dev_priv->perf.oa.exclusive_stream) {
+ if (dev_priv->perf.exclusive_stream) {
DRM_DEBUG("OA unit already in use\n");
return -EBUSY;
}
@@ -2053,43 +2203,23 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- /* We set up some ratelimit state to potentially throttle any _NOTES
- * about spurious, invalid OA reports which we don't forward to
- * userspace.
- *
- * The initialization is associated with opening the stream (not driver
- * init) considering we print a _NOTE about any throttling when closing
- * the stream instead of waiting until driver _fini which no one would
- * ever see.
- *
- * Using the same limiting factors as printk_ratelimit()
- */
- ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
- 5 * HZ, 10);
- /* Since we use a DRM_NOTE for spurious reports it would be
- * inconsistent to let __ratelimit() automatically print a warning for
- * throttling.
- */
- ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
- RATELIMIT_MSG_ON_RELEASE);
-
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
+ format_size = dev_priv->perf.oa_formats[props->oa_format].size;
stream->sample_flags |= SAMPLE_OA_REPORT;
stream->sample_size += format_size;
- dev_priv->perf.oa.oa_buffer.format_size = format_size;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
+ stream->oa_buffer.format_size = format_size;
+ if (WARN_ON(stream->oa_buffer.format_size == 0))
return -EINVAL;
- dev_priv->perf.oa.oa_buffer.format =
- dev_priv->perf.oa.oa_formats[props->oa_format].format;
+ stream->oa_buffer.format =
+ dev_priv->perf.oa_formats[props->oa_format].format;
- dev_priv->perf.oa.periodic = props->oa_periodic;
- if (dev_priv->perf.oa.periodic)
- dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
+ stream->periodic = props->oa_periodic;
+ if (stream->periodic)
+ stream->period_exponent = props->oa_period_exponent;
if (stream->ctx) {
ret = oa_get_render_ctx_id(stream);
@@ -2120,7 +2250,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- ret = alloc_oa_buffer(dev_priv);
+ ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
@@ -2129,9 +2259,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_lock;
stream->ops = &i915_oa_stream_ops;
- dev_priv->perf.oa.exclusive_stream = stream;
+ dev_priv->perf.exclusive_stream = stream;
- ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
+ ret = dev_priv->perf.ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -2139,15 +2269,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
mutex_unlock(&dev_priv->drm.struct_mutex);
+ hrtimer_init(&stream->poll_check_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ stream->poll_check_timer.function = oa_poll_check_timer_cb;
+ init_waitqueue_head(&stream->poll_wq);
+ spin_lock_init(&stream->oa_buffer.ptr_lock);
+
return 0;
err_enable:
- dev_priv->perf.oa.exclusive_stream = NULL;
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ dev_priv->perf.exclusive_stream = NULL;
+ dev_priv->perf.ops.disable_metric_set(stream);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_lock:
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
@@ -2171,9 +2307,9 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.oa.exclusive_stream;
+ stream = engine->i915->perf.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
+ gen8_update_reg_state_unlocked(stream, ce, regs, stream->oa_config);
}
/**
@@ -2289,7 +2425,7 @@ static ssize_t i915_perf_read(struct file *file,
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
return ret;
@@ -2297,13 +2433,12 @@ static ssize_t i915_perf_read(struct file *file,
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
{
- struct drm_i915_private *dev_priv =
- container_of(hrtimer, typeof(*dev_priv),
- perf.oa.poll_check_timer);
+ struct i915_perf_stream *stream =
+ container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (oa_buffer_check_unlocked(dev_priv)) {
- dev_priv->perf.oa.pollin = true;
- wake_up(&dev_priv->perf.oa.poll_wq);
+ if (oa_buffer_check_unlocked(stream)) {
+ stream->pollin = true;
+ wake_up(&stream->poll_wq);
}
hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
@@ -2342,7 +2477,7 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
* the hrtimer/oa_poll_check_timer_cb to notify us when there are
* samples to read.
*/
- if (dev_priv->perf.oa.pollin)
+ if (stream->pollin)
events |= EPOLLIN;
return events;
@@ -2768,7 +2903,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
value);
return -EINVAL;
}
- if (!dev_priv->perf.oa.oa_formats[value].size) {
+ if (!dev_priv->perf.oa_formats[value].size) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -2912,7 +3047,7 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
if (!dev_priv->perf.metrics_kobj)
goto exit;
- sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
+ sysfs_attr_init(&dev_priv->perf.test_config.sysfs_metric_id.attr);
if (INTEL_GEN(dev_priv) >= 11) {
i915_perf_load_test_config_icl(dev_priv);
@@ -2947,15 +3082,15 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_hsw(dev_priv);
}
- if (dev_priv->perf.oa.test_config.id == 0)
+ if (dev_priv->perf.test_config.id == 0)
goto sysfs_error;
ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
+ &dev_priv->perf.test_config.sysfs_metric);
if (ret)
goto sysfs_error;
- atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
+ atomic_set(&dev_priv->perf.test_config.ref_count, 1);
goto exit;
@@ -2982,7 +3117,7 @@ void i915_perf_unregister(struct drm_i915_private *dev_priv)
return;
sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
+ &dev_priv->perf.test_config.sysfs_metric);
kobject_put(dev_priv->perf.metrics_kobj);
dev_priv->perf.metrics_kobj = NULL;
@@ -3227,7 +3362,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config->mux_regs_len = args->n_mux_regs;
oa_config->mux_regs =
alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_mux_reg,
+ dev_priv->perf.ops.is_valid_mux_reg,
u64_to_user_ptr(args->mux_regs_ptr),
args->n_mux_regs);
@@ -3240,7 +3375,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config->b_counter_regs_len = args->n_boolean_regs;
oa_config->b_counter_regs =
alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_b_counter_reg,
+ dev_priv->perf.ops.is_valid_b_counter_reg,
u64_to_user_ptr(args->boolean_regs_ptr),
args->n_boolean_regs);
@@ -3259,7 +3394,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config->flex_regs_len = args->n_flex_regs;
oa_config->flex_regs =
alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_flex_reg,
+ dev_priv->perf.ops.is_valid_flex_reg,
u64_to_user_ptr(args->flex_regs_ptr),
args->n_flex_regs);
@@ -3426,20 +3561,20 @@ static struct ctl_table dev_root[] = {
void i915_perf_init(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ dev_priv->perf.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
hsw_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
- dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
- dev_priv->perf.oa.ops.read = gen7_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read =
+ dev_priv->perf.ops.is_valid_flex_reg = NULL;
+ dev_priv->perf.ops.enable_metric_set = hsw_enable_metric_set;
+ dev_priv->perf.ops.disable_metric_set = hsw_disable_metric_set;
+ dev_priv->perf.ops.oa_enable = gen7_oa_enable;
+ dev_priv->perf.ops.oa_disable = gen7_oa_disable;
+ dev_priv->perf.ops.read = gen7_oa_read;
+ dev_priv->perf.ops.oa_hw_tail_read =
gen7_oa_hw_tail_read;
- dev_priv->perf.oa.oa_formats = hsw_oa_formats;
+ dev_priv->perf.oa_formats = hsw_oa_formats;
} else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
@@ -3447,71 +3582,65 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
+ dev_priv->perf.oa_formats = gen8_plus_oa_formats;
- dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.oa.ops.read = gen8_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ dev_priv->perf.ops.oa_enable = gen8_oa_enable;
+ dev_priv->perf.ops.oa_disable = gen8_oa_disable;
+ dev_priv->perf.ops.read = gen8_oa_read;
+ dev_priv->perf.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
if (IS_GEN_RANGE(dev_priv, 8, 9)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ dev_priv->perf.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ dev_priv->perf.ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
+ dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.ops.disable_metric_set = gen8_disable_metric_set;
if (IS_GEN(dev_priv, 8)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x120;
+ dev_priv->perf.ctx_flexeu0_offset = 0x2ce;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
+ dev_priv->perf.gen8_valid_ctx_bit = BIT(25);
} else {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
}
} else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ dev_priv->perf.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
gen10_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ dev_priv->perf.ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
+ dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.ops.disable_metric_set = gen10_disable_metric_set;
if (IS_GEN(dev_priv, 10)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.ctx_flexeu0_offset = 0x3de;
} else {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x124;
+ dev_priv->perf.ctx_flexeu0_offset = 0x78e;
}
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
}
}
- if (dev_priv->perf.oa.ops.enable_metric_set) {
- hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
- init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
-
+ if (dev_priv->perf.ops.enable_metric_set) {
INIT_LIST_HEAD(&dev_priv->perf.streams);
mutex_init(&dev_priv->perf.lock);
- spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = 1000 *
(RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
@@ -3520,6 +3649,25 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->perf.metrics_lock);
idr_init(&dev_priv->perf.metrics_idr);
+ /* We set up some ratelimit state to potentially throttle any
+ * _NOTES about spurious, invalid OA reports which we don't
+ * forward to userspace.
+ *
+ * We print a _NOTE about any throttling when closing the
+ * stream instead of waiting until driver _fini which no one
+ * would ever see.
+ *
+ * Using the same limiting factors as printk_ratelimit()
+ */
+ ratelimit_state_init(&dev_priv->perf.spurious_report_rs,
+ 5 * HZ, 10);
+ /* Since we use a DRM_NOTE for spurious reports it would be
+ * inconsistent to let __ratelimit() automatically print a
+ * warning for throttling.
+ */
+ ratelimit_set_flags(&dev_priv->perf.spurious_report_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
dev_priv->perf.initialized = true;
}
}
@@ -3548,7 +3696,7 @@ void i915_perf_fini(struct drm_i915_private *dev_priv)
unregister_sysctl_table(dev_priv->perf.sysctl_header);
- memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
+ memset(&dev_priv->perf.ops, 0, sizeof(dev_priv->perf.ops));
dev_priv->perf.initialized = false;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
new file mode 100644
index 000000000000..a412b16d9ffc
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_PERF_H__
+#define __I915_PERF_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_private;
+struct intel_context;
+struct intel_engine_cs;
+
+void i915_perf_init(struct drm_i915_private *i915);
+void i915_perf_fini(struct drm_i915_private *i915);
+void i915_perf_register(struct drm_i915_private *i915);
+void i915_perf_unregister(struct drm_i915_private *i915);
+
+int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+void i915_oa_init_reg_state(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 *reg_state);
+
+#endif /* __I915_PERF_H__ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8fe46ee920a0..8e251e719390 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -8,6 +8,9 @@
#include <linux/pm_runtime.h>
#include "gt/intel_engine.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
#include "i915_pmu.h"
@@ -74,8 +77,9 @@ static unsigned int event_enabled_bit(struct perf_event *event)
return config_enabled_bit(event->attr.config);
}
-static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
u64 enable;
/*
@@ -83,7 +87,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
*
* We start with a bitmask of all currently enabled events.
*/
- enable = i915->pmu.enable;
+ enable = pmu->enable;
/*
* Mask out all the ones which do not need the timer, or in
@@ -102,10 +106,8 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
/*
* Also there is software busyness tracking available we do not
* need the timer for I915_SAMPLE_BUSY counter.
- *
- * Use RCS as proxy for all engines.
*/
- else if (intel_engine_supports_stats(i915->engine[RCS0]))
+ else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
@@ -116,24 +118,26 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
void i915_pmu_gt_parked(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- spin_lock_irq(&i915->pmu.lock);
+ spin_lock_irq(&pmu->lock);
/*
* Signal sampling timer to stop if only engine events are enabled and
* GPU went idle.
*/
- i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
- spin_unlock_irq(&i915->pmu.lock);
+ pmu->timer_enabled = pmu_needs_timer(pmu, false);
+ spin_unlock_irq(&pmu->lock);
}
-static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
+static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
- if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
- i915->pmu.timer_enabled = true;
- i915->pmu.timer_last = ktime_get();
- hrtimer_start_range_ns(&i915->pmu.timer,
+ if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
+ pmu->timer_enabled = true;
+ pmu->timer_last = ktime_get();
+ hrtimer_start_range_ns(&pmu->timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
}
@@ -141,15 +145,17 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
void i915_pmu_gt_unparked(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- spin_lock_irq(&i915->pmu.lock);
+ spin_lock_irq(&pmu->lock);
/*
* Re-enable sampling timer when GPU goes active.
*/
- __i915_pmu_maybe_start_timer(i915);
- spin_unlock_irq(&i915->pmu.lock);
+ __i915_pmu_maybe_start_timer(pmu);
+ spin_unlock_irq(&pmu->lock);
}
static void
@@ -159,32 +165,30 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
}
static void
-engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long flags;
- if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+ if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- wakeref = 0;
- if (READ_ONCE(dev_priv->gt.awake))
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
- if (!wakeref)
- return;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
+ unsigned long flags;
bool busy;
u32 val;
- val = I915_READ_FW(RING_CTL(engine->mmio_base));
- if (val == 0) /* powerwell off => engine idle */
+ if (!intel_engine_pm_get_if_awake(engine))
continue;
+ spin_lock_irqsave(&engine->uncore->lock, flags);
+
+ val = ENGINE_READ_FW(engine, RING_CTL);
+ if (val == 0) /* powerwell off => engine idle */
+ goto skip;
+
if (val & RING_WAIT)
add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
@@ -199,15 +203,16 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
*/
busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
if (!busy) {
- val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+ val = ENGINE_READ_FW(engine, RING_MI_MODE);
busy = !(val & MODE_IDLE);
}
if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
- }
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+skip:
+ spin_unlock_irqrestore(&engine->uncore->lock, flags);
+ intel_engine_pm_put(engine);
+ }
}
static void
@@ -217,34 +222,30 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
}
static void
-frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+frequency_sample(struct intel_gt *gt, unsigned int period_ns)
{
- if (dev_priv->pmu.enable &
- config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
- u32 val;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct i915_pmu *pmu = &i915->pmu;
- val = dev_priv->gt_pm.rps.cur_freq;
- if (dev_priv->gt.awake) {
- intel_wakeref_t wakeref;
+ if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ u32 val;
- with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
- wakeref) {
- val = intel_uncore_read_notrace(&dev_priv->uncore,
- GEN6_RPSTAT1);
- val = intel_get_cagf(dev_priv, val);
- }
+ val = i915->gt_pm.rps.cur_freq;
+ if (intel_gt_pm_get_if_awake(gt)) {
+ val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
+ val = intel_get_cagf(i915, val);
+ intel_gt_pm_put(gt);
}
- add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(dev_priv, val),
+ add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(i915, val),
period_ns / 1000);
}
- if (dev_priv->pmu.enable &
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq),
+ if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
period_ns / 1000);
}
}
@@ -253,15 +254,17 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ struct i915_pmu *pmu = &i915->pmu;
+ struct intel_gt *gt = &i915->gt;
unsigned int period_ns;
ktime_t now;
- if (!READ_ONCE(i915->pmu.timer_enabled))
+ if (!READ_ONCE(pmu->timer_enabled))
return HRTIMER_NORESTART;
now = ktime_get();
- period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
- i915->pmu.timer_last = now;
+ period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
+ pmu->timer_last = now;
/*
* Strictly speaking the passed in period may not be 100% accurate for
@@ -269,8 +272,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
* grabbing the forcewake. However the potential error from timer call-
* back delay greatly dominates this so we keep it simple.
*/
- engines_sample(i915, period_ns);
- frequency_sample(i915, period_ns);
+ engines_sample(gt, period_ns);
+ frequency_sample(gt, period_ns);
hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
@@ -423,8 +426,9 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __get_rc6(struct drm_i915_private *i915)
+static u64 __get_rc6(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
u64 val;
val = intel_rc6_residency_ns(i915,
@@ -441,17 +445,19 @@ static u64 __get_rc6(struct drm_i915_private *i915)
return val;
}
-static u64 get_rc6(struct drm_i915_private *i915)
+static u64 get_rc6(struct intel_gt *gt)
{
#if IS_ENABLED(CONFIG_PM)
+ struct drm_i915_private *i915 = gt->i915;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ struct i915_pmu *pmu = &i915->pmu;
intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (wakeref) {
- val = __get_rc6(i915);
+ val = __get_rc6(gt);
intel_runtime_pm_put(rpm, wakeref);
/*
@@ -460,16 +466,16 @@ static u64 get_rc6(struct drm_i915_private *i915)
* previously.
*/
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
- if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+ if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
} else {
- val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
} else {
struct device *kdev = rpm->kdev;
@@ -480,7 +486,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
/*
* After the above branch intel_runtime_pm_get_if_in_use failed
@@ -496,25 +502,25 @@ static u64 get_rc6(struct drm_i915_private *i915)
if (pm_runtime_status_suspended(kdev)) {
val = pm_runtime_suspended_time(kdev);
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_time_last = val;
+ if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ pmu->suspended_time_last = val;
- val -= i915->pmu.suspended_time_last;
- val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ val -= pmu->suspended_time_last;
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
- } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+ } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
} else {
- val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ val = pmu->sample[__I915_SAMPLE_RC6].cur;
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
}
return val;
#else
- return __get_rc6(i915);
+ return __get_rc6(gt);
#endif
}
@@ -522,6 +528,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = &i915->pmu;
u64 val = 0;
if (is_engine_event(event)) {
@@ -544,19 +551,19 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
switch (event->attr.config) {
case I915_PMU_ACTUAL_FREQUENCY:
val =
- div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+ div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
- div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+ div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(i915);
+ val = get_rc6(&i915->gt);
break;
}
}
@@ -584,24 +591,25 @@ static void i915_pmu_enable(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
+ struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
/*
* Update the bitmask of enabled events and increment
* the event reference counter.
*/
- BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
- GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
- GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
- i915->pmu.enable |= BIT_ULL(bit);
- i915->pmu.enable_count[bit]++;
+ BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+ pmu->enable |= BIT_ULL(bit);
+ pmu->enable_count[bit]++;
/*
* Start the sampling timer if needed and not already enabled.
*/
- __i915_pmu_maybe_start_timer(i915);
+ __i915_pmu_maybe_start_timer(pmu);
/*
* For per-engine events the bitmask and reference counting
@@ -627,7 +635,7 @@ static void i915_pmu_enable(struct perf_event *event)
engine->pmu.enable_count[sample]++;
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
/*
* Store the current counter value so we can report the correct delta
@@ -642,9 +650,10 @@ static void i915_pmu_disable(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
+ struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) {
u8 sample = engine_event_sample(event);
@@ -666,18 +675,18 @@ static void i915_pmu_disable(struct perf_event *event)
engine->pmu.enable &= ~BIT(sample);
}
- GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
- GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == 0);
/*
* Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away.
*/
- if (--i915->pmu.enable_count[bit] == 0) {
- i915->pmu.enable &= ~BIT_ULL(bit);
- i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+ if (--pmu->enable_count[bit] == 0) {
+ pmu->enable &= ~BIT_ULL(bit);
+ pmu->timer_enabled &= pmu_needs_timer(pmu, true);
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
}
static void i915_pmu_event_start(struct perf_event *event, int flags)
@@ -826,8 +835,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
}
static struct attribute **
-create_event_attributes(struct drm_i915_private *i915)
+create_event_attributes(struct i915_pmu *pmu)
{
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
static const struct {
u64 config;
const char *name;
@@ -851,7 +861,6 @@ create_event_attributes(struct drm_i915_private *i915)
struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
struct attribute **attr = NULL, **attr_iter;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
unsigned int i;
/* Count how many counters we will be exposing. */
@@ -860,7 +869,7 @@ create_event_attributes(struct drm_i915_private *i915)
count++;
}
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
if (!engine_event_status(engine,
engine_events[i].sample))
@@ -911,7 +920,7 @@ create_event_attributes(struct drm_i915_private *i915)
}
/* Initialize supported engine counters. */
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
char *str;
@@ -928,7 +937,7 @@ create_event_attributes(struct drm_i915_private *i915)
i915_iter =
add_i915_attr(i915_iter, str,
__I915_PMU_ENGINE(engine->uabi_class,
- engine->instance,
+ engine->uabi_instance,
engine_events[i].sample));
str = kasprintf(GFP_KERNEL, "%s-%s.unit",
@@ -941,8 +950,8 @@ create_event_attributes(struct drm_i915_private *i915)
}
}
- i915->pmu.i915_attr = i915_attr;
- i915->pmu.pmu_attr = pmu_attr;
+ pmu->i915_attr = i915_attr;
+ pmu->pmu_attr = pmu_attr;
return attr;
@@ -958,7 +967,7 @@ err_alloc:
return NULL;
}
-static void free_event_attributes(struct drm_i915_private *i915)
+static void free_event_attributes(struct i915_pmu *pmu)
{
struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
@@ -966,12 +975,12 @@ static void free_event_attributes(struct drm_i915_private *i915)
kfree((*attr_iter)->name);
kfree(i915_pmu_events_attr_group.attrs);
- kfree(i915->pmu.i915_attr);
- kfree(i915->pmu.pmu_attr);
+ kfree(pmu->i915_attr);
+ kfree(pmu->pmu_attr);
i915_pmu_events_attr_group.attrs = NULL;
- i915->pmu.i915_attr = NULL;
- i915->pmu.pmu_attr = NULL;
+ pmu->i915_attr = NULL;
+ pmu->pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
@@ -1008,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
-static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
+static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
enum cpuhp_state slot;
int ret;
@@ -1021,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return ret;
slot = ret;
- ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
+ ret = cpuhp_state_add_instance(slot, &pmu->node);
if (ret) {
cpuhp_remove_multi_state(slot);
return ret;
@@ -1031,72 +1040,75 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return 0;
}
-static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
+static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
WARN_ON(cpuhp_slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
+ WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
cpuhp_remove_multi_state(cpuhp_slot);
}
void i915_pmu_register(struct drm_i915_private *i915)
{
+ struct i915_pmu *pmu = &i915->pmu;
int ret;
if (INTEL_GEN(i915) <= 2) {
- DRM_INFO("PMU not supported for this GPU.");
+ dev_info(i915->drm.dev, "PMU not supported for this GPU.");
return;
}
- i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
+ i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
if (!i915_pmu_events_attr_group.attrs) {
ret = -ENOMEM;
goto err;
}
- i915->pmu.base.attr_groups = i915_pmu_attr_groups;
- i915->pmu.base.task_ctx_nr = perf_invalid_context;
- i915->pmu.base.event_init = i915_pmu_event_init;
- i915->pmu.base.add = i915_pmu_event_add;
- i915->pmu.base.del = i915_pmu_event_del;
- i915->pmu.base.start = i915_pmu_event_start;
- i915->pmu.base.stop = i915_pmu_event_stop;
- i915->pmu.base.read = i915_pmu_event_read;
- i915->pmu.base.event_idx = i915_pmu_event_event_idx;
-
- spin_lock_init(&i915->pmu.lock);
- hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- i915->pmu.timer.function = i915_sample;
-
- ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+ pmu->base.attr_groups = i915_pmu_attr_groups;
+ pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.event_init = i915_pmu_event_init;
+ pmu->base.add = i915_pmu_event_add;
+ pmu->base.del = i915_pmu_event_del;
+ pmu->base.start = i915_pmu_event_start;
+ pmu->base.stop = i915_pmu_event_stop;
+ pmu->base.read = i915_pmu_event_read;
+ pmu->base.event_idx = i915_pmu_event_event_idx;
+
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+
+ ret = perf_pmu_register(&pmu->base, "i915", -1);
if (ret)
goto err;
- ret = i915_pmu_register_cpuhp_state(i915);
+ ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
goto err_unreg;
return;
err_unreg:
- perf_pmu_unregister(&i915->pmu.base);
+ perf_pmu_unregister(&pmu->base);
err:
- i915->pmu.base.event_init = NULL;
- free_event_attributes(i915);
+ pmu->base.event_init = NULL;
+ free_event_attributes(pmu);
DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
}
void i915_pmu_unregister(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- WARN_ON(i915->pmu.enable);
+ WARN_ON(pmu->enable);
- hrtimer_cancel(&i915->pmu.timer);
+ hrtimer_cancel(&pmu->timer);
- i915_pmu_unregister_cpuhp_state(i915);
+ i915_pmu_unregister_cpuhp_state(pmu);
- perf_pmu_unregister(&i915->pmu.base);
- i915->pmu.base.event_init = NULL;
- free_event_attributes(i915);
+ perf_pmu_unregister(&pmu->base);
+ pmu->base.event_init = NULL;
+ free_event_attributes(pmu);
}
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 49709de69875..21037a2e2038 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -16,8 +16,6 @@ enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
- I915_PRIORITY_INVALID = INT_MIN
};
#define I915_USER_PRIORITY_SHIFT 2
@@ -29,6 +27,19 @@ enum {
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(1))
+/* Smallest priority value that cannot be bumped. */
+#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
+
+/*
+ * Requests containing performance queries must not be preempted by
+ * another context. They get scheduled with their default priority and
+ * once they reach the execlist ports we ensure that they stick on the
+ * HW until finished by pretending that they have maximum priority,
+ * i.e. nothing can have higher priority and force us to usurp the
+ * active request.
+ */
+#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
struct i915_priolist {
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 969e514916ab..683e97ac2430 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -24,6 +24,8 @@
#ifndef _I915_PVINFO_H_
#define _I915_PVINFO_H_
+#include <linux/types.h>
+
/* The MMIO offset of the shared info between guest and host emulator */
#define VGT_PVINFO_PAGE 0x78000
#define VGT_PVINFO_SIZE 0x1000
@@ -110,8 +112,9 @@ struct vgt_if {
u32 rsv7[0x200 - 24]; /* pad to one page */
} __packed;
-#define vgtif_reg(x) \
- _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
+#define vgtif_offset(x) (offsetof(struct vgt_if, x))
+
+#define vgtif_reg(x) _MMIO(VGT_PVINFO_PAGE + vgtif_offset(x))
/* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 7b7016171057..ad9240a0817a 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -105,7 +105,6 @@ query_engine_info(struct drm_i915_private *i915,
struct drm_i915_query_engine_info query;
struct drm_i915_engine_info info = { };
struct intel_engine_cs *engine;
- enum intel_engine_id id;
int len, ret;
if (query_item->flags)
@@ -125,9 +124,9 @@ query_engine_info(struct drm_i915_private *i915,
info_ptr = &query_ptr->engines[0];
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
info.engine.engine_class = engine->uabi_class;
- info.engine.engine_instance = engine->instance;
+ info.engine.engine_instance = engine->uabi_instance;
info.capabilities = engine->uabi_capabilities;
if (__copy_to_user(info_ptr, &info, sizeof(info)))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d6483b5dc8e5..2abd199093c5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -242,6 +242,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
@@ -250,9 +251,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
DISPLAY_MMIO_BASE(dev_priv))
-#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
- INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
+#define _TRANS2(tran, reg) (INTEL_INFO(dev_priv)->trans_offsets[(tran)] - \
+ INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_TRANS2(tran, reg) _MMIO(_TRANS2(tran, reg))
#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
DISPLAY_MMIO_BASE(dev_priv))
@@ -270,30 +272,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
-/* Engine ID */
-
-#define RCS0_HW 0
-#define VCS0_HW 1
-#define BCS0_HW 2
-#define VECS0_HW 3
-#define VCS1_HW 4
-#define VCS2_HW 6
-#define VCS3_HW 7
-#define VECS1_HW 12
-
-/* Engine class */
-
-#define RENDER_CLASS 0
-#define VIDEO_DECODE_CLASS 1
-#define VIDEO_ENHANCEMENT_CLASS 2
-#define COPY_ENGINE_CLASS 3
-#define OTHER_CLASS 4
-#define MAX_ENGINE_CLASS 4
-
-#define OTHER_GUC_INSTANCE 0
-#define OTHER_GTPM_INSTANCE 1
-#define MAX_ENGINE_INSTANCE 3
-
/* PCI config space */
#define MCHBAR_I915 0x44
@@ -1161,27 +1139,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define PUNIT_REG_ISPSSPM0 0x39
#define PUNIT_REG_ISPSSPM1 0x3a
-/*
- * i915_power_well_id:
- *
- * IDs used to look up power wells. Power wells accessed directly bypassing
- * the power domains framework must be assigned a unique ID. The rest of power
- * wells must be assigned DISP_PW_ID_NONE.
- */
-enum i915_power_well_id {
- DISP_PW_ID_NONE,
-
- VLV_DISP_PW_DISP2D,
- BXT_DISP_PW_DPIO_CMN_A,
- VLV_DISP_PW_DPIO_CMN_BC,
- GLK_DISP_PW_DPIO_CMN_C,
- CHV_DISP_PW_DPIO_CMN_D,
- HSW_DISP_PW_GLOBAL,
- SKL_DISP_PW_MISC_IO,
- SKL_DISP_PW_1,
- SKL_DISP_PW_2,
-};
-
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2))
@@ -1793,19 +1750,21 @@ enum i915_power_well_id {
*/
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
-#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
- _ICL_COMBOPHY_B)
+#define _EHL_COMBOPHY_C 0x160000
+#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
+ _ICL_COMBOPHY_B, \
+ _EHL_COMBOPHY_C)
/* CNL/ICL Port CL_DW registers */
-#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
4 * (dw))
#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
-#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
+#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
#define CL_POWER_DOWN_ENABLE (1 << 4)
#define SUS_CLOCK_CONFIG (3 << 0)
-#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
+#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
@@ -1820,23 +1779,23 @@ enum i915_power_well_id {
#define PWR_DOWN_LN_MASK (0xf << 4)
#define PWR_DOWN_LN_SHIFT 4
-#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
+#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
#define ICL_LANE_ENABLE_AUX (1 << 0)
/* CNL/ICL Port COMP_DW registers */
#define _ICL_PORT_COMP 0x100
-#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_COMP + 4 * (dw))
#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
-#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
+#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
#define COMP_INIT (1 << 31)
#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
-#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
+#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
-#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
+#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
#define PROCESS_INFO_DOT_0 (0 << 26)
#define PROCESS_INFO_DOT_1 (1 << 26)
#define PROCESS_INFO_DOT_4 (2 << 26)
@@ -1848,14 +1807,14 @@ enum i915_power_well_id {
#define VOLTAGE_INFO_MASK (3 << 24)
#define VOLTAGE_INFO_SHIFT 24
-#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port))
+#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
#define IREFGEN (1 << 24)
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
-#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
+#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
-#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
+#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy))
/* CNL/ICL Port PCS registers */
#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
@@ -1868,14 +1827,14 @@ enum i915_power_well_id {
#define _CNL_PORT_PCS_DW1_LN0_C 0x162C04
#define _CNL_PORT_PCS_DW1_LN0_D 0x162E04
#define _CNL_PORT_PCS_DW1_LN0_F 0x162804
-#define CNL_PORT_PCS_DW1_GRP(port) _MMIO(_PICK(port, \
+#define CNL_PORT_PCS_DW1_GRP(phy) _MMIO(_PICK(phy, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_B, \
_CNL_PORT_PCS_DW1_GRP_C, \
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_F))
-#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
+#define CNL_PORT_PCS_DW1_LN0(phy) _MMIO(_PICK(phy, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
_CNL_PORT_PCS_DW1_LN0_C, \
@@ -1886,16 +1845,18 @@ enum i915_power_well_id {
#define _ICL_PORT_PCS_AUX 0x300
#define _ICL_PORT_PCS_GRP 0x600
#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
-#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_AUX + 4 * (dw))
-#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_GRP + 4 * (dw))
-#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_LN(ln) + 4 * (dw))
-#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
-#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
-#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
+#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
+#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
+#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
#define COMMON_KEEPER_EN (1 << 26)
+#define LATENCY_OPTIM_MASK (0x3 << 2)
+#define LATENCY_OPTIM_VAL(x) ((x) << 2)
/* CNL/ICL Port TX registers */
#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
@@ -1929,18 +1890,18 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_GRP 0x680
#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
-#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_AUX + 4 * (dw))
-#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_GRP + 4 * (dw))
-#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_LN(ln) + 4 * (dw))
#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
-#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
-#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
-#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
+#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
+#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
+#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1957,10 +1918,10 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
-#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
-#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
-#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
-#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
+#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
+#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
+#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy))
+#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1971,9 +1932,9 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
-#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
-#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
-#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
+#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
+#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
+#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -1984,13 +1945,17 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
-#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
-#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
-#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
-#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
+#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
+#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
+#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy))
+#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
+#define _ICL_DPHY_CHKN_REG 0x194
+#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
+#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
+
#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
@@ -2195,9 +2160,13 @@ enum i915_power_well_id {
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
#define FIA1_BASE 0x163000
+#define FIA2_BASE 0x16E000
+#define FIA3_BASE 0x16F000
+#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
+#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
@@ -2477,6 +2446,7 @@ enum i915_power_well_id {
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
@@ -2486,6 +2456,7 @@ enum i915_power_well_id {
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
+#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
@@ -2513,13 +2484,19 @@ enum i915_power_well_id {
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
-#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */
-#define RING_FORCE_TO_NONPRIV_RD (1 << 28)
-#define RING_FORCE_TO_NONPRIV_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28)
#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0)
+#define RING_FORCE_TO_NONPRIV_MASK_VALID \
+ (RING_FORCE_TO_NONPRIV_RANGE_MASK \
+ | RING_FORCE_TO_NONPRIV_ACCESS_MASK)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
@@ -2614,6 +2591,8 @@ enum i915_power_well_id {
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
+#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
+#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
@@ -3229,25 +3208,7 @@ enum i915_power_well_id {
#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
-#define GMBUS_PIN_DISABLED 0
-#define GMBUS_PIN_SSC 1
-#define GMBUS_PIN_VGADDC 2
-#define GMBUS_PIN_PANEL 3
-#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
-#define GMBUS_PIN_DPC 4 /* HDMIC */
-#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
-#define GMBUS_PIN_DPD 6 /* HDMID */
-#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
-#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
-#define GMBUS_PIN_2_BXT 2
-#define GMBUS_PIN_3_BXT 3
-#define GMBUS_PIN_4_CNP 4
-#define GMBUS_PIN_9_TC1_ICP 9
-#define GMBUS_PIN_10_TC2_ICP 10
-#define GMBUS_PIN_11_TC3_ICP 11
-#define GMBUS_PIN_12_TC4_ICP 12
-
-#define GMBUS_NUM_PINS 13 /* including 0 */
+
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1 << 31)
#define GMBUS_SW_RDY (1 << 30)
@@ -4209,6 +4170,7 @@ enum {
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define CHV_TRANSCODER_C_OFFSET 0x63000
+#define TRANSCODER_D_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
#define TRANSCODER_DSI0_OFFSET 0x6b000
#define TRANSCODER_DSI1_OFFSET 0x6b800
@@ -5755,6 +5717,7 @@ enum {
#define PIPE_A_OFFSET 0x70000
#define PIPE_B_OFFSET 0x71000
#define PIPE_C_OFFSET 0x72000
+#define PIPE_D_OFFSET 0x73000
#define CHV_PIPE_C_OFFSET 0x74000
/*
* There's actually no pipe EDP. Some pipe registers have
@@ -6284,6 +6247,7 @@ enum {
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
+#define _DSPAGAMC 0x701E0
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
@@ -6295,6 +6259,7 @@ enum {
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
+#define DSPGAMC(plane, i) _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -6397,6 +6362,7 @@ enum {
#define _DVSAKEYMAXVAL 0x721a0
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
+#define _DVSAGAMC_G4X 0x721e0 /* g4x */
#define _DVSASCALE 0x72204
#define DVS_SCALE_ENABLE (1 << 31)
#define DVS_FILTER_MASK (3 << 29)
@@ -6405,7 +6371,8 @@ enum {
#define DVS_FILTER_SOFTENING (2 << 29)
#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
-#define _DVSAGAMC 0x72300
+#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
+#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
#define _DVSBCNTR 0x73180
#define _DVSBLINOFF 0x73184
@@ -6418,8 +6385,10 @@ enum {
#define _DVSBKEYMAXVAL 0x731a0
#define _DVSBTILEOFF 0x731a4
#define _DVSBSURFLIVE 0x731ac
+#define _DVSBGAMC_G4X 0x731e0 /* g4x */
#define _DVSBSCALE 0x73204
-#define _DVSBGAMC 0x73300
+#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
+#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
@@ -6433,6 +6402,9 @@ enum {
#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
+#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
+#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1 << 31)
@@ -6457,7 +6429,7 @@ enum {
#define SPRITE_YUV_ORDER_VYUY (3 << 16)
#define SPRITE_ROTATE_180 (1 << 15)
#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14)
-#define SPRITE_INT_GAMMA_ENABLE (1 << 13)
+#define SPRITE_INT_GAMMA_DISABLE (1 << 13)
#define SPRITE_TILED (1 << 10)
#define SPRITE_DEST_KEY (1 << 2)
#define _SPRA_LINOFF 0x70284
@@ -6480,6 +6452,8 @@ enum {
#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _SPRA_GAMC 0x70400
+#define _SPRA_GAMC16 0x70440
+#define _SPRA_GAMC17 0x7044c
#define _SPRB_CTL 0x71280
#define _SPRB_LINOFF 0x71284
@@ -6495,6 +6469,8 @@ enum {
#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
+#define _SPRB_GAMC16 0x71440
+#define _SPRB_GAMC17 0x7144c
#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
@@ -6508,7 +6484,9 @@ enum {
#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
+#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
+#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
@@ -6551,7 +6529,7 @@ enum {
#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */
#define SP_SH_COS(x) (x) /* u3.7 */
-#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
@@ -6566,10 +6544,12 @@ enum {
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
-#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
+#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+ _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
@@ -6584,7 +6564,7 @@ enum {
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
-#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
+#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
/*
* CHV pipe B sprite CSC
@@ -7228,6 +7208,8 @@ enum {
#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
+#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
+#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -7317,16 +7299,6 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
-#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31)
-#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30)
-#define GEN9_GUC_DISPLAY_EVENT (1 << 29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28)
-#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27)
-#define GEN9_GUC_DB_RING_EVENT (1 << 26)
-#define GEN9_GUC_DMA_DONE_EVENT (1 << 25)
-#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24)
-#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23)
-
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
@@ -7388,6 +7360,9 @@ enum {
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_DDIC (1 << 2)
+#define TGL_DE_PORT_AUX_DDIB (1 << 1)
+#define TGL_DE_PORT_AUX_DDIA (1 << 0)
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
@@ -7431,21 +7406,29 @@ enum {
#define GEN11_DE_HPD_IMR _MMIO(0x44474)
#define GEN11_DE_HPD_IIR _MMIO(0x44478)
#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN12_TC6_HOTPLUG (1 << 21)
+#define GEN12_TC5_HOTPLUG (1 << 20)
#define GEN11_TC4_HOTPLUG (1 << 19)
#define GEN11_TC3_HOTPLUG (1 << 18)
#define GEN11_TC2_HOTPLUG (1 << 17)
#define GEN11_TC1_HOTPLUG (1 << 16)
#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16))
-#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN12_TC6_HOTPLUG | \
+ GEN12_TC5_HOTPLUG | \
+ GEN11_TC4_HOTPLUG | \
GEN11_TC3_HOTPLUG | \
GEN11_TC2_HOTPLUG | \
GEN11_TC1_HOTPLUG)
+#define GEN12_TBT6_HOTPLUG (1 << 5)
+#define GEN12_TBT5_HOTPLUG (1 << 4)
#define GEN11_TBT4_HOTPLUG (1 << 3)
#define GEN11_TBT3_HOTPLUG (1 << 2)
#define GEN11_TBT2_HOTPLUG (1 << 1)
#define GEN11_TBT1_HOTPLUG (1 << 0)
#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port))
-#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN12_TBT6_HOTPLUG | \
+ GEN12_TBT5_HOTPLUG | \
+ GEN11_TBT4_HOTPLUG | \
GEN11_TBT3_HOTPLUG | \
GEN11_TBT2_HOTPLUG | \
GEN11_TBT1_HOTPLUG)
@@ -7479,6 +7462,9 @@ enum {
#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
+/* irq instances for OTHER_CLASS */
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GTPM_INSTANCE 1
#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
@@ -7606,6 +7592,7 @@ enum {
#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
#define SKL_DSSM _MMIO(0x51004)
#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
@@ -7690,6 +7677,9 @@ enum {
#define GEN7_L3SQCREG4 _MMIO(0xb034)
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+#define GEN11_SCRATCH2 _MMIO(0xb140)
+#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
@@ -7827,12 +7817,15 @@ enum {
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
-/* south display engine interrupt: ICP */
+/* south display engine interrupt: ICP/TGP */
+#define SDE_TC6_HOTPLUG_TGP (1 << 29)
+#define SDE_TC5_HOTPLUG_TGP (1 << 28)
#define SDE_TC4_HOTPLUG_ICP (1 << 27)
#define SDE_TC3_HOTPLUG_ICP (1 << 26)
#define SDE_TC2_HOTPLUG_ICP (1 << 25)
#define SDE_TC1_HOTPLUG_ICP (1 << 24)
#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_DDIC_HOTPLUG_TGP (1 << 18)
#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
@@ -7843,6 +7836,11 @@ enum {
SDE_TC3_HOTPLUG_ICP | \
SDE_TC2_HOTPLUG_ICP | \
SDE_TC1_HOTPLUG_ICP)
+#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \
+ SDE_DDI_MASK_ICP)
+#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \
+ SDE_TC5_HOTPLUG_TGP | \
+ SDE_TC_MASK_ICP)
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
@@ -7910,6 +7908,12 @@ enum {
*/
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define TGP_DDIC_HPD_ENABLE (1 << 11)
+#define TGP_DDIC_HPD_STATUS_MASK (3 << 8)
+#define TGP_DDIC_HPD_NO_DETECT (0 << 8)
+#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8)
+#define TGP_DDIC_HPD_LONG_DETECT (2 << 8)
+#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8)
#define ICP_DDIB_HPD_ENABLE (1 << 7)
#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
@@ -8033,6 +8037,18 @@ enum {
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \
+ ICP_DDIA_HPD_ENABLE)
+#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \
+ ICP_TC_HPD_ENABLE(PORT_TC3) | \
+ ICP_TC_HPD_ENABLE(PORT_TC2) | \
+ ICP_TC_HPD_ENABLE(PORT_TC1))
+#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \
+ ICP_DDI_HPD_ENABLE_MASK)
+#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \
+ ICP_TC_HPD_ENABLE(PORT_TC5) | \
+ ICP_TC_HPD_ENABLE_MASK)
+
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
@@ -9119,7 +9135,8 @@ enum {
#define GLK_PW_CTL_IDX_DDI_A 1
#define SKL_PW_CTL_IDX_MISC_IO 0
-/* ICL - power wells */
+/* ICL/TGL - power wells */
+#define TGL_PW_CTL_IDX_PW_5 4
#define ICL_PW_CTL_IDX_PW_4 3
#define ICL_PW_CTL_IDX_PW_3 2
#define ICL_PW_CTL_IDX_PW_2 1
@@ -9128,13 +9145,25 @@ enum {
#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define TGL_PW_CTL_IDX_AUX_TBT6 14
+#define TGL_PW_CTL_IDX_AUX_TBT5 13
+#define TGL_PW_CTL_IDX_AUX_TBT4 12
#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define TGL_PW_CTL_IDX_AUX_TBT3 11
#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define TGL_PW_CTL_IDX_AUX_TBT2 10
#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define TGL_PW_CTL_IDX_AUX_TBT1 9
#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define TGL_PW_CTL_IDX_AUX_TC4 6
#define ICL_PW_CTL_IDX_AUX_F 5
+#define TGL_PW_CTL_IDX_AUX_TC3 5
#define ICL_PW_CTL_IDX_AUX_E 4
+#define TGL_PW_CTL_IDX_AUX_TC2 4
#define ICL_PW_CTL_IDX_AUX_D 3
+#define TGL_PW_CTL_IDX_AUX_TC1 3
#define ICL_PW_CTL_IDX_AUX_C 2
#define ICL_PW_CTL_IDX_AUX_B 1
#define ICL_PW_CTL_IDX_AUX_A 0
@@ -9142,9 +9171,15 @@ enum {
#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define TGL_PW_CTL_IDX_DDI_TC5 7
+#define TGL_PW_CTL_IDX_DDI_TC4 6
#define ICL_PW_CTL_IDX_DDI_F 5
+#define TGL_PW_CTL_IDX_DDI_TC3 5
#define ICL_PW_CTL_IDX_DDI_E 4
+#define TGL_PW_CTL_IDX_DDI_TC2 4
#define ICL_PW_CTL_IDX_DDI_D 3
+#define TGL_PW_CTL_IDX_DDI_TC1 3
#define ICL_PW_CTL_IDX_DDI_C 2
#define ICL_PW_CTL_IDX_DDI_B 1
#define ICL_PW_CTL_IDX_DDI_A 0
@@ -9197,9 +9232,11 @@ enum skl_power_gate {
#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
#define _ICL_AUX_ANAOVRD1_A 0x162398
#define _ICL_AUX_ANAOVRD1_B 0x6C398
+#define _TGL_AUX_ANAOVRD1_C 0x160398
#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
_ICL_AUX_ANAOVRD1_A, \
- _ICL_AUX_ANAOVRD1_B))
+ _ICL_AUX_ANAOVRD1_B, \
+ _TGL_AUX_ANAOVRD1_C))
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
@@ -9321,6 +9358,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
#define _TRANS_DDI_FUNC_CTL_C 0x62400
+#define _TRANS_DDI_FUNC_CTL_D 0x63400
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
@@ -9328,10 +9366,14 @@ enum skl_power_gate {
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_MASK (7 << 28)
#define TRANS_DDI_PORT_SHIFT 28
-#define TRANS_DDI_SELECT_PORT(x) ((x) << 28)
-#define TRANS_DDI_PORT_NONE (0 << 28)
+#define TGL_TRANS_DDI_PORT_SHIFT 27
+#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1)
#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
@@ -9541,6 +9583,9 @@ enum skl_power_gate {
/* For each transcoder, we need to select the corresponding port clock */
#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
+#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
+#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
+
#define CDCLK_FREQ _MMIO(0x46200)
@@ -9672,17 +9717,22 @@ enum skl_power_gate {
* CNL Clocks
*/
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
-#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
(port) + 10))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) + 10))
-#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \
- 21 : (tc_port) + 12))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
(port) * 2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
+#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \
+ (tc_port) + 12 : \
+ (tc_port) - PORT_TC4 + 21))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
#define DPLL1_ENABLE 0x46014
@@ -9887,6 +9937,7 @@ enum skl_power_gate {
#define DPLL_CFGCR1_PDIV_7 (8 << 2)
#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
+#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
#define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
#define _ICL_DPLL0_CFGCR0 0x164000
@@ -9899,6 +9950,22 @@ enum skl_power_gate {
#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
_ICL_DPLL1_CFGCR1)
+#define _TGL_DPLL0_CFGCR0 0x164284
+#define _TGL_DPLL1_CFGCR0 0x16428C
+/* TODO: add DPLL4 */
+#define _TGL_TBTPLL_CFGCR0 0x16429C
+#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0)
+
+#define _TGL_DPLL0_CFGCR1 0x164288
+#define _TGL_DPLL1_CFGCR1 0x164290
+/* TODO: add DPLL4 */
+#define _TGL_TBTPLL_CFGCR1 0x1642A0
+#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1)
+
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -10896,6 +10963,7 @@ enum skl_power_gate {
#define CALIBRATION_DISABLED (0x0 << 4)
#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define BLANKING_PACKET_ENABLE (1 << 2)
#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
#define EOTP_DISABLED (1 << 0)
@@ -11130,6 +11198,8 @@ enum skl_power_gate {
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
#define PMFLUSHDONE_LNEBLK (1 << 22)
+#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
@@ -11145,6 +11215,7 @@ enum skl_power_gate {
#define _ICL_PHY_MISC_B 0x64C04
#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \
_ICL_PHY_MISC_B)
+#define ICL_PHY_MISC_MUX_DDID (1 << 28)
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
/* Icelake Display Stream Compression Registers */
@@ -11454,17 +11525,18 @@ enum skl_power_gate {
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
+#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
+#define MODULAR_FIA_MASK (1 << 4)
#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
-#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
+#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
-#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
+#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a195a92d0105..a53777dd371c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -35,6 +35,7 @@
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_trace.h"
#include "intel_pm.h"
struct execute_cb {
@@ -119,12 +120,56 @@ const struct dma_fence_ops i915_fence_ops = {
.release = i915_fence_release,
};
+static void irq_execute_cb(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void irq_execute_cb_hook(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ cb->hook(container_of(cb->fence, struct i915_request, submit),
+ &cb->signal->fence);
+ i915_request_put(cb->signal);
+
+ irq_execute_cb(wrk);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
+{
+ struct execute_cb *cb;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (list_empty(&rq->execute_cb))
+ return;
+
+ list_for_each_entry(cb, &rq->execute_cb, link)
+ irq_work_queue(&cb->work);
+
+ /*
+ * XXX Rollback on __i915_request_unsubmit()
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
static inline void
-i915_request_remove_from_client(struct i915_request *request)
+remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
- file_priv = request->file_priv;
+ file_priv = READ_ONCE(request->file_priv);
if (!file_priv)
return;
@@ -136,40 +181,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static void advance_ring(struct i915_request *request)
-{
- struct intel_ring *ring = request->ring;
- unsigned int tail;
-
- /*
- * We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
- *
- * Note this requires that we are always called in request
- * completion order.
- */
- GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
- if (list_is_last(&request->ring_link, &ring->request_list)) {
- /*
- * We may race here with execlists resubmitting this request
- * as we retire it. The resubmission will move the ring->tail
- * forwards (to request->wa_tail). We either read the
- * current value that was written to hw, or the value that
- * is just about to be. Either works, if we miss the last two
- * noops - they are safe to be replayed on a reset.
- */
- tail = READ_ONCE(request->tail);
- list_del(&ring->active_link);
- } else {
- tail = request->postfix;
- }
- list_del_init(&request->ring_link);
-
- ring->head = tail;
-}
-
static void free_capture_list(struct i915_request *request)
{
struct i915_capture_list *capture;
@@ -187,7 +198,7 @@ static bool i915_request_retire(struct i915_request *rq)
{
struct i915_active_request *active, *next;
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ lockdep_assert_held(&rq->timeline->mutex);
if (!i915_request_completed(rq))
return false;
@@ -199,7 +210,17 @@ static bool i915_request_retire(struct i915_request *rq)
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
- advance_ring(rq);
+ /*
+ * We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ *
+ * Note this requires that we are always called in request
+ * completion order.
+ */
+ GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
+ rq->ring->head = rq->postfix;
/*
* Walk through the active list, calling retire on each. This allows
@@ -232,6 +253,12 @@ static bool i915_request_retire(struct i915_request *rq)
local_irq_disable();
+ /*
+ * We only loosely track inflight requests across preemption,
+ * and so we may find ourselves attempting to retire a _completed_
+ * request that we have removed from the HW and put back on a run
+ * queue.
+ */
spin_lock(&rq->engine->active.lock);
list_del(&rq->sched.link);
spin_unlock(&rq->engine->active.lock);
@@ -242,20 +269,25 @@ static bool i915_request_retire(struct i915_request *rq)
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
- if (rq->waitboost) {
+ if (i915_request_has_waitboost(rq)) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
}
+ if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+ __notify_execute_cb(rq);
+ }
+ GEM_BUG_ON(!list_empty(&rq->execute_cb));
spin_unlock(&rq->lock);
local_irq_enable();
+ remove_from_client(rq);
+ list_del(&rq->link);
+
intel_context_exit(rq->hw_context);
intel_context_unpin(rq->hw_context);
- i915_request_remove_from_client(rq);
- list_del(&rq->link);
-
free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
@@ -265,7 +297,7 @@ static bool i915_request_retire(struct i915_request *rq)
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_ring *ring = rq->ring;
+ struct intel_timeline * const tl = rq->timeline;
struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -273,62 +305,14 @@ void i915_request_retire_upto(struct i915_request *rq)
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ lockdep_assert_held(&tl->mutex);
GEM_BUG_ON(!i915_request_completed(rq));
- if (list_empty(&rq->ring_link))
- return;
-
do {
- tmp = list_first_entry(&ring->request_list,
- typeof(*tmp), ring_link);
+ tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
} while (i915_request_retire(tmp) && tmp != rq);
}
-static void irq_execute_cb(struct irq_work *wrk)
-{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
- i915_sw_fence_complete(cb->fence);
- kmem_cache_free(global.slab_execute_cbs, cb);
-}
-
-static void irq_execute_cb_hook(struct irq_work *wrk)
-{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
- cb->hook(container_of(cb->fence, struct i915_request, submit),
- &cb->signal->fence);
- i915_request_put(cb->signal);
-
- irq_execute_cb(wrk);
-}
-
-static void __notify_execute_cb(struct i915_request *rq)
-{
- struct execute_cb *cb;
-
- lockdep_assert_held(&rq->lock);
-
- if (list_empty(&rq->execute_cb))
- return;
-
- list_for_each_entry(cb, &rq->execute_cb, link)
- irq_work_queue(&cb->work);
-
- /*
- * XXX Rollback on __i915_request_unsubmit()
- *
- * In the future, perhaps when we have an active time-slicing scheduler,
- * it will be interesting to unsubmit parallel execution and remove
- * busywaits from the GPU until their master is restarted. This is
- * quite hairy, we have to carefully rollback the fence and do a
- * preempt-to-idle cycle on the target engine, all the while the
- * master execute_cb may refire.
- */
- INIT_LIST_HEAD(&rq->execute_cb);
-}
-
static int
__i915_request_await_execution(struct i915_request *rq,
struct i915_request *signal,
@@ -512,6 +496,10 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
trace_i915_request_submit(request);
+
+ if (unlikely(fence->error))
+ i915_request_skip(request, fence->error);
+
/*
* We need to serialize use of the submit_request() callback
* with its hotplugging performed during an emergency
@@ -552,29 +540,28 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static void ring_retire_requests(struct intel_ring *ring)
+static void retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_context *ce, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
{
- struct intel_ring *ring = ce->ring;
struct i915_request *rq;
- if (list_empty(&ring->request_list))
+ if (list_empty(&tl->requests))
goto out;
if (!gfpflags_allow_blocking(gfp))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
i915_request_retire(rq);
rq = kmem_cache_alloc(global.slab_requests,
@@ -583,11 +570,11 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
return rq;
/* Ratelimit ourselves to prevent oom from malicious clients */
- rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
cond_synchronize_rcu(rq->rcustate);
/* Retire our old requests in the hope that we free some */
- ring_retire_requests(ring);
+ retire_requests(tl);
out:
return kmem_cache_alloc(global.slab_requests, gfp);
@@ -596,7 +583,7 @@ out:
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct i915_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
@@ -638,14 +625,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(ce, gfp);
+ rq = request_alloc_slow(tl, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
}
}
- ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
@@ -673,7 +660,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
- rq->waitboost = false;
+ rq->flags = 0;
rq->execution_mask = ALL_ENGINES;
INIT_LIST_HEAD(&rq->active_list);
@@ -730,15 +717,15 @@ struct i915_request *
i915_request_create(struct intel_context *ce)
{
struct i915_request *rq;
- int err;
+ struct intel_timeline *tl;
- err = intel_context_timeline_lock(ce);
- if (err)
- return ERR_PTR(err);
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return ERR_CAST(tl);
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
+ if (!list_is_last(&rq->link, &tl->requests))
i915_request_retire(rq);
intel_context_enter(ce);
@@ -748,23 +735,23 @@ i915_request_create(struct intel_context *ce)
goto err_unlock;
/* Check that we do not interrupt ourselves with a new request */
- rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+ rq->cookie = lockdep_pin_lock(&tl->mutex);
return rq;
err_unlock:
- intel_context_timeline_unlock(ce);
+ intel_context_timeline_unlock(tl);
return rq;
}
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+ if (list_is_first(&signal->link, &signal->timeline->requests))
return 0;
- signal = list_prev_entry(signal, ring_link);
- if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ signal = list_prev_entry(signal, link);
+ if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
return 0;
return i915_sw_fence_await_dma_fence(&rq->submit,
@@ -818,7 +805,7 @@ emit_semaphore_wait(struct i915_request *to,
return err;
/* We need to pin the signaler's HWSP until we are finished reading. */
- err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
@@ -928,8 +915,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != rq->i915->mm.unordered_timeline &&
- i915_timeline_sync_is_later(rq->timeline, fence))
+ if (fence->context &&
+ intel_timeline_sync_is_later(rq->timeline, fence))
continue;
if (dma_fence_is_i915(fence))
@@ -942,8 +929,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != rq->i915->mm.unordered_timeline)
- i915_timeline_sync_set(rq->timeline, fence);
+ if (fence->context)
+ intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild);
return 0;
@@ -1027,7 +1014,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -1044,7 +1031,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
@@ -1065,6 +1052,9 @@ void i915_request_skip(struct i915_request *rq, int error)
GEM_BUG_ON(!IS_ERR_VALUE((long)error));
dma_fence_set_error(&rq->fence, error);
+ if (rq->infix == rq->postfix)
+ return;
+
/*
* As this request likely depends on state from the lost
* context, clear out all the user operations leaving the
@@ -1076,12 +1066,13 @@ void i915_request_skip(struct i915_request *rq, int error)
head = 0;
}
memset(vaddr + head, 0, rq->postfix - head);
+ rq->infix = rq->postfix;
}
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
- struct i915_timeline *timeline = rq->timeline;
+ struct intel_timeline *timeline = rq->timeline;
struct i915_request *prev;
/*
@@ -1104,7 +1095,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = rcu_dereference_protected(timeline->last_request.request, 1);
+ prev = rcu_dereference_protected(timeline->last_request.request,
+ lockdep_is_held(&timeline->mutex));
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1143,7 +1135,6 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_ring *ring = rq->ring;
- struct i915_request *prev;
u32 *cs;
GEM_TRACE("%s fence %llx:%lld\n",
@@ -1156,6 +1147,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
*/
GEM_BUG_ON(rq->reserved_space > ring->space);
rq->reserved_space = 0;
+ rq->emitted_jiffies = jiffies;
/*
* Record the position of the start of the breadcrumb so that
@@ -1167,13 +1159,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
GEM_BUG_ON(IS_ERR(cs));
rq->postfix = intel_ring_offset(rq, cs);
- prev = __i915_request_add_to_timeline(rq);
-
- list_add_tail(&rq->ring_link, &ring->request_list);
- if (list_is_first(&rq->ring_link, &ring->request_list))
- list_add(&ring->active_link, &rq->i915->gt.active_rings);
- rq->emitted_jiffies = jiffies;
+ return __i915_request_add_to_timeline(rq);
+}
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
/*
* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@@ -1185,57 +1176,54 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- local_bh_disable();
i915_sw_fence_commit(&rq->semaphore);
- rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule) {
- struct i915_sched_attr attr = rq->gem_context->sched;
-
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
-
- engine->schedule(rq, &attr);
- }
- rcu_read_unlock();
+ if (attr && rq->engine->schedule)
+ rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->submit);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
-
- return prev;
}
void i915_request_add(struct i915_request *rq)
{
+ struct i915_sched_attr attr = rq->gem_context->sched;
+ struct intel_timeline * const tl = rq->timeline;
struct i915_request *prev;
- lockdep_assert_held(&rq->timeline->mutex);
- lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
prev = __i915_request_commit(rq);
/*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /*
* In typical scenarios, we do not expect the previous request on
* the timeline to be still tracked by timeline->last_request if it
* has been completed. If the completed request is still here, that
@@ -1252,10 +1240,10 @@ void i915_request_add(struct i915_request *rq)
* work on behalf of others -- but instead we should benefit from
* improved resource management. (Well, that's the theory at least.)
*/
- if (prev && i915_request_completed(prev))
+ if (prev && i915_request_completed(prev) && prev->timeline == tl)
i915_request_retire_upto(prev);
- mutex_unlock(&rq->timeline->mutex);
+ mutex_unlock(&tl->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1390,8 +1378,7 @@ long i915_request_wait(struct i915_request *rq,
* serialise wait/reset with an explicit lock, we do want
* lockdep to detect potential dependency cycles.
*/
- mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
- 0, 0, _THIS_IP_);
+ mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
/*
* Optimistic spin before touching IRQs.
@@ -1447,8 +1434,10 @@ long i915_request_wait(struct i915_request *rq,
for (;;) {
set_current_state(state);
- if (i915_request_completed(rq))
+ if (i915_request_completed(rq)) {
+ dma_fence_signal(&rq->fence);
break;
+ }
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
@@ -1467,25 +1456,51 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
- mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
bool i915_retire_requests(struct drm_i915_private *i915)
{
- struct intel_ring *ring, *tmp;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
+ struct intel_timeline *tl, *tn;
+ unsigned long flags;
+ LIST_HEAD(free);
+
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ if (!mutex_trylock(&tl->mutex))
+ continue;
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!tl->active_count);
+ tl->active_count++; /* pin the list element */
+ spin_unlock_irqrestore(&timelines->lock, flags);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ retire_requests(tl);
- list_for_each_entry_safe(ring, tmp,
- &i915->gt.active_rings, active_link) {
- intel_ring_get(ring); /* last rq holds reference! */
- ring_retire_requests(ring);
- intel_ring_put(ring);
+ spin_lock_irqsave(&timelines->lock, flags);
+
+ /* Resume iteration after dropping lock */
+ list_safe_reset_next(tl, tn, link);
+ if (!--tl->active_count)
+ list_del(&tl->link);
+
+ mutex_unlock(&tl->mutex);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(tl->active_count);
+ list_add(&tl->link, &free);
+ }
}
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
- return !list_empty(&i915->gt.active_rings);
+ return !list_empty(&timelines->active_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index edbbdfec24ab..8ac6e1226a56 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,6 +28,7 @@
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
+#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
#include "i915_gem.h"
@@ -40,8 +41,8 @@
struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
-struct i915_timeline;
-struct i915_timeline_cacheline;
+struct intel_timeline;
+struct intel_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
@@ -112,7 +113,7 @@ struct i915_request {
struct intel_engine_cs *engine;
struct intel_context *hw_context;
struct intel_ring *ring;
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
struct list_head signal_link;
/*
@@ -175,7 +176,7 @@ struct i915_request {
* inside the timeline's HWSP vma, but it is only valid while this
* request has not completed and guarded by the timeline mutex.
*/
- struct i915_timeline_cacheline *hwsp_cacheline;
+ struct intel_timeline_cacheline *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -215,14 +216,13 @@ struct i915_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- bool waitboost;
+ unsigned long flags;
+#define I915_REQUEST_WAITBOOST BIT(0)
+#define I915_REQUEST_NOPREEMPT BIT(1)
/** timeline->request entry for this request */
struct list_head link;
- /** ring->request_list entry for this request */
- struct list_head ring_link;
-
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_link;
@@ -248,6 +248,8 @@ struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
struct i915_request *__i915_request_commit(struct i915_request *request);
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr);
void i915_request_retire_upto(struct i915_request *rq);
@@ -429,6 +431,17 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
}
+static inline bool i915_request_has_waitboost(const struct i915_request *rq)
+{
+ return rq->flags & I915_REQUEST_WAITBOOST;
+}
+
+static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
+{
+ /* Preemption should only be disabled very rarely */
+ return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
+}
+
bool i915_retire_requests(struct drm_i915_private *i915);
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2e9b38bdc33c..7b84ebca2901 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -179,8 +179,7 @@ static inline int rq_prio(const struct i915_request *rq)
static void kick_submission(struct intel_engine_cs *engine, int prio)
{
- const struct i915_request *inflight =
- port_request(engine->execlists.port);
+ const struct i915_request *inflight = *engine->execlists.active;
/*
* If we are already the currently executing context, don't
@@ -350,8 +349,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
-
- if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+ if (READ_ONCE(rq->sched.attr.priority) & bump)
return;
spin_lock_irqsave(&schedule_lock, flags);
@@ -395,6 +393,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
+ dep->waiter = node;
dep->flags = flags;
/* Keep track of whether anyone on this chain has a semaphore */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 3e309631bd0b..aad81acba9dc 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -62,6 +62,7 @@ struct i915_sched_node {
struct i915_dependency {
struct i915_sched_node *signaler;
+ struct i915_sched_node *waiter;
struct list_head signal_link;
struct list_head wait_link;
struct list_head dfs_link;
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 207e21b478f2..4d88205de51b 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -24,6 +24,8 @@
#ifndef __I915_SELFTEST_H__
#define __I915_SELFTEST_H__
+#include <linux/types.h>
+
struct pci_dev;
struct drm_i915_private;
@@ -66,12 +68,37 @@ struct i915_subtest {
const char *name;
};
+int __i915_nop_setup(void *data);
+int __i915_nop_teardown(int err, void *data);
+
+int __i915_live_setup(void *data);
+int __i915_live_teardown(int err, void *data);
+
+int __intel_gt_live_setup(void *data);
+int __intel_gt_live_teardown(int err, void *data);
+
int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
const struct i915_subtest *st,
unsigned int count,
void *data);
#define i915_subtests(T, data) \
- __i915_subtests(__func__, T, ARRAY_SIZE(T), data)
+ __i915_subtests(__func__, \
+ __i915_nop_setup, __i915_nop_teardown, \
+ T, ARRAY_SIZE(T), data)
+#define i915_live_subtests(T, data) ({ \
+ typecheck(struct drm_i915_private *, data); \
+ __i915_subtests(__func__, \
+ __i915_live_setup, __i915_live_teardown, \
+ T, ARRAY_SIZE(T), data); \
+})
+#define intel_gt_live_subtests(T, data) ({ \
+ typecheck(struct intel_gt *, data); \
+ __i915_subtests(__func__, \
+ __intel_gt_live_setup, __intel_gt_live_teardown, \
+ T, ARRAY_SIZE(T), data); \
+})
#define SUBTEST(x) { x, #x }
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a08d7d16621b..8508a01ad8b9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,8 +29,9 @@
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
+#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "i915_suspend.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/i915_suspend.h b/drivers/gpu/drm/i915/i915_suspend.h
new file mode 100644
index 000000000000..3a36fb4ecc05
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_suspend.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SUSPEND_H__
+#define __I915_SUSPEND_H__
+
+struct drm_i915_private;
+
+int i915_save_state(struct drm_i915_private *i915);
+int i915_restore_state(struct drm_i915_private *i915);
+
+#endif /* __I915_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 5387aafd3424..6a88db291252 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_sw_fence.h"
#include "i915_selftest.h"
@@ -157,8 +157,11 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
LIST_HEAD(extra);
do {
- list_for_each_entry_safe(pos, next, &x->head, entry)
- pos->func(pos, TASK_NORMAL, 0, &extra);
+ list_for_each_entry_safe(pos, next, &x->head, entry) {
+ pos->func(pos,
+ TASK_NORMAL, fence->error,
+ &extra);
+ }
if (list_empty(&extra))
break;
@@ -219,6 +222,8 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
+ fence->error = 0;
+
fence->flags = (unsigned long)fn;
}
@@ -230,6 +235,8 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
{
+ i915_sw_fence_set_error_once(wq->private, flags);
+
list_del(&wq->entry);
__i915_sw_fence_complete(wq->private, key);
@@ -302,8 +309,10 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (i915_sw_fence_done(signaler))
+ if (i915_sw_fence_done(signaler)) {
+ i915_sw_fence_set_error_once(fence, signaler->error);
return 0;
+ }
debug_fence_assert(signaler);
@@ -319,6 +328,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return -ENOMEM;
i915_sw_fence_wait(signaler);
+ i915_sw_fence_set_error_once(fence, signaler->error);
return 0;
}
@@ -337,7 +347,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
__add_wait_queue_entry_tail(&signaler->wait, wq);
pending = 1;
} else {
- i915_sw_fence_wake(wq, 0, 0, NULL);
+ i915_sw_fence_wake(wq, 0, signaler->error, NULL);
pending = 0;
}
spin_unlock_irqrestore(&signaler->wait.lock, flags);
@@ -372,6 +382,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ i915_sw_fence_set_error_once(cb->fence, dma->error);
i915_sw_fence_complete(cb->fence);
kfree(cb);
}
@@ -391,6 +402,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
+ i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
i915_sw_fence_complete(fence);
}
@@ -480,6 +492,7 @@ static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ i915_sw_fence_set_error_once(cb->fence, dma->error);
i915_sw_fence_complete(cb->fence);
}
@@ -501,7 +514,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (ret == 0) {
ret = 1;
} else {
- i915_sw_fence_complete(fence);
+ __dma_i915_sw_fence_wake(dma, &cb->base);
if (ret == -ENOENT) /* fence already signaled */
ret = 0;
}
@@ -510,7 +523,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
}
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
@@ -526,7 +539,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -551,7 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 9cb5c3b307a6..ab7d58bd0b9d 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,12 +16,13 @@
#include <linux/wait.h>
struct completion;
-struct reservation_object;
+struct dma_resv;
struct i915_sw_fence {
wait_queue_head_t wait;
unsigned long flags;
atomic_t pending;
+ int error;
};
#define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */
@@ -82,7 +83,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
@@ -106,4 +107,10 @@ static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
wait_event(fence->wait, i915_sw_fence_done(fence));
}
+static inline void
+i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error)
+{
+ cmpxchg(&fence->error, 0, error);
+}
+
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
new file mode 100644
index 000000000000..07552cd544f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_sw_fence_work.h"
+
+static void fence_work(struct work_struct *work)
+{
+ struct dma_fence_work *f = container_of(work, typeof(*f), work);
+ int err;
+
+ err = f->ops->work(f);
+ if (err)
+ dma_fence_set_error(&f->dma, err);
+ dma_fence_signal(&f->dma);
+ dma_fence_put(&f->dma);
+}
+
+static int __i915_sw_fence_call
+fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (fence->error)
+ dma_fence_set_error(&f->dma, fence->error);
+
+ if (!f->dma.error) {
+ dma_fence_get(&f->dma);
+ queue_work(system_unbound_wq, &f->work);
+ } else {
+ dma_fence_signal(&f->dma);
+ }
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&f->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+ return "dma-fence";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
+
+ return f->ops->name ?: "work";
+}
+
+static void fence_release(struct dma_fence *fence)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
+
+ if (f->ops->release)
+ f->ops->release(f);
+
+ i915_sw_fence_fini(&f->chain);
+
+ BUILD_BUG_ON(offsetof(typeof(*f), dma));
+ dma_fence_free(&f->dma);
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = get_driver_name,
+ .get_timeline_name = get_timeline_name,
+ .release = fence_release,
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+ const struct dma_fence_work_ops *ops)
+{
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
+ i915_sw_fence_init(&f->chain, fence_notify);
+ INIT_WORK(&f->work, fence_work);
+
+ f->ops = ops;
+}
+
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
+{
+ if (!signal)
+ return 0;
+
+ return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
new file mode 100644
index 000000000000..3a22b287e201
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef I915_SW_FENCE_WORK_H
+#define I915_SW_FENCE_WORK_H
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "i915_sw_fence.h"
+
+struct dma_fence_work;
+
+struct dma_fence_work_ops {
+ const char *name;
+ int (*work)(struct dma_fence_work *f);
+ void (*release)(struct dma_fence_work *f);
+};
+
+struct dma_fence_work {
+ struct dma_fence dma;
+ spinlock_t lock;
+
+ struct i915_sw_fence chain;
+ struct i915_sw_dma_fence_cb cb;
+
+ struct work_struct work;
+ const struct dma_fence_work_ops *ops;
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+ const struct dma_fence_work_ops *ops);
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
+
+static inline void dma_fence_work_commit(struct dma_fence_work *f)
+{
+ i915_sw_fence_commit(&f->chain);
+}
+
+#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ecac1c386109..d8a3b180c084 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -31,7 +31,7 @@
#include <linux/sysfs.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "i915_sysfs.h"
#include "intel_pm.h"
#include "intel_sideband.h"
diff --git a/drivers/gpu/drm/i915/i915_sysfs.h b/drivers/gpu/drm/i915/i915_sysfs.h
new file mode 100644
index 000000000000..41afd4366416
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SYSFS_H__
+#define __I915_SYSFS_H__
+
+struct drm_i915_private;
+
+void i915_setup_sysfs(struct drm_i915_private *i915);
+void i915_teardown_sysfs(struct drm_i915_private *i915);
+
+#endif /* __I915_SYSFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
deleted file mode 100644
index 36e5e5a65155..000000000000
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef I915_TIMELINE_H
-#define I915_TIMELINE_H
-
-#include <linux/lockdep.h>
-
-#include "i915_active.h"
-#include "i915_syncmap.h"
-#include "i915_timeline_types.h"
-
-int i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *tl,
- struct i915_vma *hwsp);
-void i915_timeline_fini(struct i915_timeline *tl);
-
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
- struct i915_vma *global_hwsp);
-
-static inline struct i915_timeline *
-i915_timeline_get(struct i915_timeline *timeline)
-{
- kref_get(&timeline->kref);
- return timeline;
-}
-
-void __i915_timeline_free(struct kref *kref);
-static inline void i915_timeline_put(struct i915_timeline *timeline)
-{
- kref_put(&timeline->kref, __i915_timeline_free);
-}
-
-static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
- u64 context, u32 seqno)
-{
- return i915_syncmap_set(&tl->sync, context, seqno);
-}
-
-static inline int i915_timeline_sync_set(struct i915_timeline *tl,
- const struct dma_fence *fence)
-{
- return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
-}
-
-static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
- u64 context, u32 seqno)
-{
- return i915_syncmap_is_later(&tl->sync, context, seqno);
-}
-
-static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
- const struct dma_fence *fence)
-{
- return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
-}
-
-int i915_timeline_pin(struct i915_timeline *tl);
-int i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno);
-void i915_timeline_unpin(struct i915_timeline *tl);
-
-int i915_timeline_read_hwsp(struct i915_request *from,
- struct i915_request *until,
- u32 *hwsp_offset);
-
-void i915_timelines_init(struct drm_i915_private *i915);
-void i915_timelines_park(struct drm_i915_private *i915);
-void i915_timelines_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index cce426b23a24..24f2944da09d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -8,11 +8,11 @@
#include <drm/drm_drv.h>
+#include "display/intel_display_types.h"
#include "gt/intel_engine.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_drv.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -293,16 +293,16 @@ TRACE_EVENT(intel_update_plane,
TP_STRUCT__entry(
__field(enum pipe, pipe)
- __field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
__array(int, src, 4)
__array(int, dst, 4)
+ __string(name, plane->name)
),
TP_fast_assign(
+ __assign_str(name, plane->name);
__entry->pipe = crtc->pipe;
- __entry->name = plane->name;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
@@ -310,7 +310,7 @@ TRACE_EVENT(intel_update_plane,
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
- pipe_name(__entry->pipe), __entry->name,
+ pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
@@ -322,20 +322,20 @@ TRACE_EVENT(intel_disable_plane,
TP_STRUCT__entry(
__field(enum pipe, pipe)
- __field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
+ __string(name, plane->name)
),
TP_fast_assign(
+ __assign_str(name, plane->name);
__entry->pipe = crtc->pipe;
- __entry->name = plane->name;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __entry->name,
+ pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -677,7 +677,7 @@ TRACE_EVENT(i915_request_queue,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
@@ -706,7 +706,7 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
),
@@ -751,7 +751,7 @@ TRACE_EVENT(i915_request_in,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->prio = rq->sched.attr.priority;
@@ -782,7 +782,7 @@ TRACE_EVENT(i915_request_out,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->completed = i915_request_completed(rq);
@@ -847,7 +847,7 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
new file mode 100644
index 000000000000..16acdf7bdbe6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_drv.h>
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+ "providing the dmesg log by booting with drm.debug=0xf"
+
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...)
+{
+ static bool shown_bug_once;
+ struct device *kdev = dev_priv->drm.dev;
+ bool is_error = level[1] <= KERN_ERR[1];
+ bool is_debug = level[1] == KERN_DEBUG[1];
+ struct va_format vaf;
+ va_list args;
+
+ if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (is_error)
+ dev_printk(level, kdev, "%pV", &vaf);
+ else
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
+
+ if (is_error && !shown_bug_once) {
+ /*
+ * Ask the user to file a bug report for the error, except
+ * if they may have caused the bug by fiddling with unsafe
+ * module parameters.
+ */
+ if (!test_taint(TAINT_USER))
+ dev_notice(kdev, "%s", FDO_BUG_MSG);
+ shown_bug_once = true;
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+static unsigned int i915_probe_fail_count;
+
+int __i915_inject_load_error(struct drm_i915_private *i915, int err,
+ const char *func, int line)
+{
+ if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
+ return 0;
+
+ if (++i915_probe_fail_count < i915_modparams.inject_load_failure)
+ return 0;
+
+ __i915_printk(i915, KERN_INFO,
+ "Injecting failure %d at checkpoint %u [%s:%d]\n",
+ err, i915_modparams.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure = 0;
+ return err;
+}
+
+bool i915_error_injected(void)
+{
+ return i915_probe_fail_count && !i915_modparams.inject_load_failure;
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 2987219a6300..562f756da421 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -31,6 +31,8 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+struct drm_i915_private;
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -49,6 +51,34 @@
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
+void __printf(3, 4)
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...);
+
+#define i915_report_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
+int __i915_inject_load_error(struct drm_i915_private *i915, int err,
+ const char *func, int line);
+#define i915_inject_load_error(_i915, _err) \
+ __i915_inject_load_error((_i915), (_err), __func__, __LINE__)
+bool i915_error_injected(void);
+
+#else
+
+#define i915_inject_load_error(_i915, _err) 0
+#define i915_error_injected() false
+
+#endif
+
+#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV)
+
+#define i915_probe_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
#define add_overflows_t(T, A, B) \
__builtin_add_overflow_p((A), (B), (T)0)
@@ -131,6 +161,16 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
((typeof(ptr))((unsigned long)(ptr) | __bits)); \
})
+#define ptr_dec(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v - 1); \
+})
+
+#define ptr_inc(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v + 1); \
+})
+
#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
@@ -370,4 +410,15 @@ static inline const char *enableddisabled(bool v)
return v ? "enabled" : "disabled";
}
+static inline void add_taint_for_CI(unsigned int taint)
+{
+ /*
+ * The system is "ok", just about surviving for the user, but
+ * CI results are now unreliable as the HW is very suspect.
+ * CI checks the taint state after every test and will reboot
+ * the machine if the kernel is tainted.
+ */
+ add_taint(taint, LOCKDEP_STILL_OK);
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 724627afdedc..968be26735c5 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,7 +21,6 @@
* SOFTWARE.
*/
-#include "intel_drv.h"
#include "i915_vgpu.h"
/**
@@ -52,34 +51,54 @@
*/
/**
- * i915_check_vgpu - detect virtual GPU
+ * i915_detect_vgpu - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_check_vgpu(struct drm_i915_private *dev_priv)
+void i915_detect_vgpu(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
u16 version_major;
+ void __iomem *shared_area;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
- if (magic != VGT_MAGIC)
+ /*
+ * This is called before we setup the main MMIO BAR mappings used via
+ * the uncore structure, so we need to access the BAR directly. Since
+ * we do not support VGT on older gens, return early so we don't have
+ * to consider differently numbered or sized MMIO bars
+ */
+ if (INTEL_GEN(dev_priv) < 6)
+ return;
+
+ shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
+ if (!shared_area) {
+ DRM_ERROR("failed to map MMIO bar to check for VGT\n");
return;
+ }
+
+ magic = readq(shared_area + vgtif_offset(magic));
+ if (magic != VGT_MAGIC)
+ goto out;
- version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
+ version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
- return;
+ goto out;
}
- dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
+ dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps));
dev_priv->vgpu.active = true;
+ mutex_init(&dev_priv->vgpu.lock);
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+
+out:
+ pci_iounmap(pdev, shared_area);
}
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
@@ -115,22 +134,22 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which we reserved earlier
*
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
*/
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
+void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
int i;
- if (!intel_vgpu_active(dev_priv))
+ if (!intel_vgpu_active(ggtt->vm.i915))
return;
DRM_DEBUG("VGT deballoon.\n");
for (i = 0; i < 4; i++)
- vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]);
+ vgt_deballoon_space(ggtt, &bl_info.space[i]);
}
static int vgt_balloon_space(struct i915_ggtt *ggtt,
@@ -156,7 +175,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which to reserve
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as
@@ -198,22 +217,26 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
*/
-int intel_vgt_balloon(struct drm_i915_private *dev_priv)
+int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
int ret;
- if (!intel_vgpu_active(dev_priv))
+ if (!intel_vgpu_active(ggtt->vm.i915))
return 0;
- mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
- mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
- unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
- unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+ mappable_base =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base));
+ mappable_size =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size));
+ unmappable_base =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base));
+ unmappable_size =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size));
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index ebe1b7bced98..8b3663dad193 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,9 +24,10 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
+#include "i915_drv.h"
#include "i915_pvinfo.h"
-void i915_check_vgpu(struct drm_i915_private *dev_priv);
+void i915_detect_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
@@ -42,7 +43,7 @@ intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
}
-int intel_vgt_balloon(struct drm_i915_private *dev_priv);
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct i915_ggtt *ggtt);
+void intel_vgt_deballoon(struct i915_ggtt *ggtt);
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a57729be8312..e0e677b2a3a9 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -22,14 +22,17 @@
*
*/
+#include <linux/sched/mm.h>
#include <drm/drm_gem.h>
#include "display/intel_frontbuffer.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_trace.h"
#include "i915_vma.h"
static struct i915_global_vma {
@@ -77,43 +80,19 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
#endif
-static void obj_bump_mru(struct drm_i915_gem_object *obj)
+static inline struct i915_vma *active_to_vma(struct i915_active *ref)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ return container_of(ref, typeof(struct i915_vma), active);
+}
- obj->mm.dirty = true; /* be paranoid */
+static int __i915_vma_active(struct i915_active *ref)
+{
+ return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- struct i915_vma *vma = container_of(ref, typeof(*vma), active);
- struct drm_i915_gem_object *obj = vma->obj;
-
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- if (--obj->active_count)
- return;
-
- /* Prune the shared fence arrays iff completely idle (inc. external) */
- if (reservation_object_trylock(obj->base.resv)) {
- if (reservation_object_test_signaled_rcu(obj->base.resv, true))
- reservation_object_add_excl_fence(obj->base.resv, NULL);
- reservation_object_unlock(obj->base.resv);
- }
-
- /*
- * Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (i915_gem_object_is_shrinkable(obj))
- obj_bump_mru(obj);
-
- i915_gem_object_put(obj); /* and drop the active reference */
+ i915_vma_put(active_to_vma(ref));
}
static struct i915_vma *
@@ -125,7 +104,7 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
vma = i915_vma_alloc();
if (vma == NULL)
@@ -138,8 +117,15 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
+ i915_active_init(vm->i915, &vma->active,
+ __i915_vma_active, __i915_vma_retire);
+
+ /* Declare ourselves safe for use inside shrinkers */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&vma->active.mutex);
+ fs_reclaim_release(GFP_KERNEL);
+ }
INIT_LIST_HEAD(&vma->closed_link);
@@ -408,7 +394,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
if (!i915_vma_has_ggtt_write(vma))
return;
- i915_gem_flush_ggtt_writes(vma->vm->i915);
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
i915_vma_unset_ggtt_write(vma);
}
@@ -814,8 +800,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
- GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
-
mutex_lock(&vma->vm->mutex);
list_del(&vma->vm_link);
mutex_unlock(&vma->vm->mutex);
@@ -880,7 +864,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
u64 vma_offset;
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vma->vm->mutex);
if (!i915_vma_has_userfault(vma))
return;
@@ -899,28 +883,12 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
-static void export_fence(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct reservation_object *resv = vma->resv;
-
- /*
- * Ignore errors from failing to allocate the new fence, we can't
- * handle an error right now. Worst case should be missed
- * synchronisation leading to rendering corruption.
- */
- if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv, 1) == 0)
- reservation_object_add_shared_fence(resv, &rq->fence);
-}
-
int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
+ int err;
assert_vma_held(vma);
assert_object_held(obj);
@@ -934,33 +902,31 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!vma->active.count && !obj->active_count++)
- i915_gem_object_get(obj); /* once more for the active ref */
+ err = i915_active_ref(&vma->active, rq->timeline, rq);
+ if (unlikely(err))
+ return err;
- if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
- if (!vma->active.count && !--obj->active_count)
- i915_gem_object_put(obj);
- return -ENOMEM;
- }
-
- GEM_BUG_ON(!i915_vma_is_active(vma));
- GEM_BUG_ON(!obj->active_count);
-
- obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- __i915_active_request_set(&obj->frontbuffer_write, rq);
+ if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
+ i915_active_ref(&obj->frontbuffer->write,
+ rq->timeline,
+ rq);
+ dma_resv_add_excl_fence(vma->resv, &rq->fence);
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
+ } else {
+ err = dma_resv_reserve_shared(vma->resv, 1);
+ if (unlikely(err))
+ return err;
+
+ dma_resv_add_shared_fence(vma->resv, &rq->fence);
+ obj->write_domain = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->mm.dirty = true;
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
- __i915_active_request_set(&vma->last_fence, rq);
-
- export_fence(vma, rq, flags);
+ GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
}
@@ -990,14 +956,7 @@ int i915_vma_unbind(struct i915_vma *vma)
* before we are finished).
*/
__i915_vma_pin(vma);
-
ret = i915_active_wait(&vma->active);
- if (ret)
- goto unpin;
-
- ret = i915_active_request_retire(&vma->last_fence,
- &vma->vm->i915->drm.struct_mutex);
-unpin:
__i915_vma_unpin(vma);
if (ret)
return ret;
@@ -1023,12 +982,16 @@ unpin:
GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
+ mutex_lock(&vma->vm->mutex);
+ ret = i915_vma_revoke_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
if (ret)
return ret;
/* Force a pagefault for domain tracking on next user access */
+ mutex_lock(&vma->vm->mutex);
i915_vma_revoke_mmap(vma);
+ mutex_unlock(&vma->vm->mutex);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
@@ -1047,6 +1010,22 @@ unpin:
return 0;
}
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
+{
+ i915_gem_object_make_unshrinkable(vma->obj);
+ return vma;
+}
+
+void i915_vma_make_shrinkable(struct i915_vma *vma)
+{
+ i915_gem_object_make_shrinkable(vma->obj);
+}
+
+void i915_vma_make_purgeable(struct i915_vma *vma)
+{
+ i915_gem_object_make_purgeable(vma->obj);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4b769db649bf..889fc7cb910a 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,7 +55,7 @@ struct i915_vma {
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
struct i915_fence_reg *fence;
- struct reservation_object *resv; /** Alias of obj->resv */
+ struct dma_resv *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
void *private; /* owned by creator */
@@ -111,7 +111,6 @@ struct i915_vma {
#define I915_VMA_GGTT_WRITE BIT(14)
struct i915_active active;
- struct i915_active_request last_fence;
/**
* Support different GGTT views into the same object.
@@ -232,6 +231,14 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
return vma;
}
+static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
+{
+ if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
+ return vma;
+
+ return NULL;
+}
+
static inline void i915_vma_put(struct i915_vma *vma)
{
i915_gem_object_put(vma->obj);
@@ -299,16 +306,16 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
-#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
static inline void i915_vma_lock(struct i915_vma *vma)
{
- reservation_object_lock(vma->resv, NULL);
+ dma_resv_lock(vma->resv, NULL);
}
static inline void i915_vma_unlock(struct i915_vma *vma)
{
- reservation_object_unlock(vma->resv);
+ dma_resv_unlock(vma->resv);
}
int __i915_vma_do_pin(struct i915_vma *vma,
@@ -414,13 +421,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
*
* True if the vma has a fence, false otherwise.
*/
-int i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
+int __must_check i915_vma_pin_fence(struct i915_vma *vma);
+int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
+ GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
+ atomic_dec(&vma->fence->pin_count);
}
/**
@@ -459,4 +466,8 @@ void i915_vma_parked(struct drm_i915_private *i915);
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
+void i915_vma_make_shrinkable(struct i915_vma *vma);
+void i915_vma_make_purgeable(struct i915_vma *vma);
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 6ef74531588a..546577e39b4e 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -39,6 +39,11 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(TGL_CSR_PATH);
+
#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define ICL_CSR_MAX_FW_SIZE 0x6000
@@ -674,6 +679,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
intel_csr_runtime_pm_get(dev_priv);
if (INTEL_GEN(dev_priv) >= 12) {
+ csr->fw_path = TGL_CSR_PATH;
+ csr->required_version = TGL_CSR_VERSION_REQUIRED;
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
} else if (IS_GEN(dev_priv, 11)) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7135d8dc32a7..d0ed44d33484 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -58,6 +58,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
PLATFORM_NAME(ELKHARTLAKE),
+ PLATFORM_NAME(TIGERLAKE),
};
#undef PLATFORM_NAME
@@ -715,7 +716,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
}
return freq;
- } else if (INTEL_GEN(dev_priv) <= 11) {
+ } else if (INTEL_GEN(dev_priv) <= 12) {
u32 ctc_reg = I915_READ(CTC_MODE);
u32 freq = 0;
@@ -929,35 +930,28 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 disabled_mask = 0;
- bool invalid;
- int num_bits;
+ u8 enabled_mask = BIT(info->num_pipes) - 1;
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
- disabled_mask |= BIT(PIPE_A);
+ enabled_mask &= ~BIT(PIPE_A);
if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
- disabled_mask |= BIT(PIPE_B);
+ enabled_mask &= ~BIT(PIPE_B);
if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
- disabled_mask |= BIT(PIPE_C);
-
- num_bits = hweight8(disabled_mask);
-
- switch (disabled_mask) {
- case BIT(PIPE_A):
- case BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_C):
- invalid = true;
- break;
- default:
- invalid = false;
- }
+ enabled_mask &= ~BIT(PIPE_C);
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ (dfsm & TGL_DFSM_PIPE_D_DISABLE))
+ enabled_mask &= ~BIT(PIPE_D);
- if (num_bits > info->num_pipes || invalid)
- DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
- disabled_mask);
+ /*
+ * At least one pipe should be enabled and if there are
+ * disabled pipes, they should be the last ones, with no holes
+ * in the mask.
+ */
+ if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
+ DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
+ enabled_mask);
else
- info->num_pipes -= num_bits;
+ info->num_pipes = hweight8(enabled_mask);
}
/* Initialize slice/subslice/EU info */
@@ -1028,8 +1022,9 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
/*
* In Gen11, only even numbered logical VDBOXes are
* hooked up to an SFC (Scaler & Format Converter) unit.
+ * In TGL each VDBOX has access to an SFC.
*/
- if (logical_vdbox++ % 2 == 0)
+ if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index ddafc819bf30..92e0c2e0954c 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -78,6 +78,8 @@ enum intel_platform {
/* gen11 */
INTEL_ICELAKE,
INTEL_ELKHARTLAKE,
+ /* gen12 */
+ INTEL_TIGERLAKE,
INTEL_MAX_PLATFORMS
};
@@ -110,7 +112,8 @@ enum intel_ppgtt_type {
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
- func(has_guc); \
+ func(has_global_mocs); \
+ func(has_gt_uc); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
@@ -136,6 +139,7 @@ enum intel_ppgtt_type {
func(has_gmch); \
func(has_hotplug); \
func(has_ipc); \
+ func(has_modular_fia); \
func(has_overlay); \
func(has_psr); \
func(overlay_needs_physical); \
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/intel_guc_ads.h
deleted file mode 100644
index 7f40f9cd5fb9..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_ads.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_GUC_ADS_H_
-#define _INTEL_GUC_ADS_H_
-
-struct intel_guc;
-
-int intel_guc_ads_create(struct intel_guc *guc);
-void intel_guc_ads_destroy(struct intel_guc *guc);
-void intel_guc_ads_reset(struct intel_guc *guc);
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
deleted file mode 100644
index 72cdafd9636a..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Vinit Azad <vinit.azad@intel.com>
- * Ben Widawsky <ben@bwidawsk.net>
- * Dave Gordon <david.s.gordon@intel.com>
- * Alex Dai <yu.dai@intel.com>
- */
-
-#include "intel_guc_fw.h"
-#include "i915_drv.h"
-
-#define __MAKE_GUC_FW_PATH(KEY) \
- "i915/" \
- __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \
- __stringify(KEY##_GUC_FW_MAJOR) "." \
- __stringify(KEY##_GUC_FW_MINOR) "." \
- __stringify(KEY##_GUC_FW_PATCH) ".bin"
-
-#define SKL_GUC_FW_PREFIX skl
-#define SKL_GUC_FW_MAJOR 32
-#define SKL_GUC_FW_MINOR 0
-#define SKL_GUC_FW_PATCH 3
-#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL)
-MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH);
-
-#define BXT_GUC_FW_PREFIX bxt
-#define BXT_GUC_FW_MAJOR 32
-#define BXT_GUC_FW_MINOR 0
-#define BXT_GUC_FW_PATCH 3
-#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT)
-MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH);
-
-#define KBL_GUC_FW_PREFIX kbl
-#define KBL_GUC_FW_MAJOR 32
-#define KBL_GUC_FW_MINOR 0
-#define KBL_GUC_FW_PATCH 3
-#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL)
-MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH);
-
-#define GLK_GUC_FW_PREFIX glk
-#define GLK_GUC_FW_MAJOR 32
-#define GLK_GUC_FW_MINOR 0
-#define GLK_GUC_FW_PATCH 3
-#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK)
-MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH);
-
-#define ICL_GUC_FW_PREFIX icl
-#define ICL_GUC_FW_MAJOR 32
-#define ICL_GUC_FW_MINOR 0
-#define ICL_GUC_FW_PATCH 3
-#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL)
-MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH);
-
-static void guc_fw_select(struct intel_uc_fw *guc_fw)
-{
- struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *i915 = guc_to_i915(guc);
-
- GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
-
- if (!HAS_GUC(i915))
- return;
-
- if (i915_modparams.guc_firmware_path) {
- guc_fw->path = i915_modparams.guc_firmware_path;
- guc_fw->major_ver_wanted = 0;
- guc_fw->minor_ver_wanted = 0;
- } else if (IS_ICELAKE(i915)) {
- guc_fw->path = ICL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
- } else if (IS_GEMINILAKE(i915)) {
- guc_fw->path = GLK_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
- } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
- guc_fw->path = KBL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
- } else if (IS_BROXTON(i915)) {
- guc_fw->path = BXT_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
- } else if (IS_SKYLAKE(i915)) {
- guc_fw->path = SKL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
- }
-}
-
-/**
- * intel_guc_fw_init_early() - initializes GuC firmware struct
- * @guc: intel_guc struct
- *
- * On platforms with GuC selects firmware for uploading
- */
-void intel_guc_fw_init_early(struct intel_guc *guc)
-{
- struct intel_uc_fw *guc_fw = &guc->fw;
-
- intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC);
- guc_fw_select(guc_fw);
-}
-
-static void guc_prepare_xfer(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /* Must program this register before loading the ucode with DMA */
- I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
- GUC_ENABLE_READ_CACHE_LOGIC |
- GUC_ENABLE_MIA_CACHING |
- GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
- GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
- GUC_ENABLE_MIA_CLOCK_GATING);
-
- if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- else
- I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
-
- if (IS_GEN(dev_priv, 9)) {
- /* DOP Clock Gating Enable for GuC clocks */
- I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- I915_READ(GEN7_MISCCPCTL)));
-
- /* allows for 5us (in 10ns units) before GT can go to RC6 */
- I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
- }
-}
-
-/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *fw = &guc->fw;
- struct sg_table *pages = fw->obj->mm.pages;
- u32 rsa[UOS_RSA_SCRATCH_COUNT];
- int i;
-
- sg_pcopy_to_buffer(pages->sgl, pages->nents,
- rsa, sizeof(rsa), fw->rsa_offset);
-
- for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
- I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-}
-
-static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /* Did we complete the xfer? */
- *status = I915_READ(DMA_CTRL);
- return !(*status & START_DMA);
-}
-
-/*
- * Read the GuC status register (GUC_STATUS) and store it in the
- * specified location; then return a boolean indicating whether
- * the value matches either of two values representing completion
- * of the GuC boot process.
- *
- * This is used for polling the GuC status in a wait_for()
- * loop below.
- */
-static inline bool guc_ready(struct intel_guc *guc, u32 *status)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 val = I915_READ(GUC_STATUS);
- u32 uk_val = val & GS_UKERNEL_MASK;
-
- *status = val;
- return (uk_val == GS_UKERNEL_READY) ||
- ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
-}
-
-static int guc_wait_ucode(struct intel_guc *guc)
-{
- u32 status;
- int ret;
-
- /*
- * Wait for the GuC to start up.
- * NB: Docs recommend not using the interrupt for completion.
- * Measurements indicate this should take no more than 20ms, so a
- * timeout here indicates that the GuC has failed and is unusable.
- * (Higher levels of the driver may decide to reset the GuC and
- * attempt the ucode load again if this happens.)
- */
- ret = wait_for(guc_ready(guc, &status), 100);
- DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
- if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- DRM_ERROR("GuC firmware signature verification failed\n");
- ret = -ENOEXEC;
- }
-
- if (ret == 0 && !guc_xfer_completed(guc, &status)) {
- DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
- status);
- ret = -ENXIO;
- }
-
- return ret;
-}
-
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- unsigned long offset;
-
- /*
- * The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components
- */
- I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
- /* Set the source address for the new blob */
- offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
-
- /*
- * Set the DMA destination. Current uCode expects the code to be
- * loaded at 8k; locations below this are used for the stack.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- /* Finally start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
- return guc_wait_ucode(guc);
-}
-/*
- * Load the GuC firmware blob into the MinuteIA.
- */
-static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
-{
- struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- guc_prepare_xfer(guc);
-
- /*
- * Note that GuC needs the CSS header plus uKernel code to be copied
- * by the DMA engine in one operation, whereas the RSA signature is
- * loaded via MMIO.
- */
- guc_xfer_rsa(guc);
-
- ret = guc_xfer_ucode(guc);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * intel_guc_fw_upload() - load GuC uCode to device
- * @guc: intel_guc structure
- *
- * Called from intel_uc_init_hw() during driver load, resume from sleep and
- * after a GPU reset.
- *
- * The firmware image should have already been fetched into memory, so only
- * check that fetch succeeded, and then transfer the image to the h/w.
- *
- * Return: non-zero code on error
- */
-int intel_guc_fw_upload(struct intel_guc *guc)
-{
- return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
-}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h
deleted file mode 100644
index 4ec5d3d9e2b0..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_fw.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_GUC_FW_H_
-#define _INTEL_GUC_FW_H_
-
-struct intel_guc;
-
-void intel_guc_fw_init_early(struct intel_guc *guc);
-int intel_guc_fw_upload(struct intel_guc *guc);
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 1d7d26e4cf14..2b6c016387c2 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (!i915_modparams.enable_gvt) {
@@ -122,13 +122,14 @@ bail:
}
/**
- * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
+ * intel_gvt_driver_remove - cleanup GVT components when i915 driver is
+ * unbinding
* @dev_priv: drm i915 private *
*
* This function is called at the i915 driver unloading stage, to shutdown
* GVT components and release the related resources.
*/
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
if (!intel_gvt_active(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 61b246470282..502fad8a8652 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,11 +24,11 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
-struct intel_gvt;
+struct drm_i915_private;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
+void intel_gvt_driver_remove(struct drm_i915_private *dev_priv);
int intel_gvt_init_device(struct drm_i915_private *dev_priv);
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
@@ -38,7 +38,8 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
return 0;
}
-static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+
+static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
deleted file mode 100644
index 2a6c94e79f17..000000000000
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_HUC_H_
-#define _INTEL_HUC_H_
-
-#include "i915_reg.h"
-#include "intel_uc_fw.h"
-#include "intel_huc_fw.h"
-
-struct intel_huc {
- /* Generic uC firmware management */
- struct intel_uc_fw fw;
-
- /* HuC-specific additions */
- struct i915_vma *rsa_data;
- void *rsa_data_vaddr;
-
- struct {
- i915_reg_t reg;
- u32 mask;
- u32 value;
- } status;
-};
-
-void intel_huc_init_early(struct intel_huc *huc);
-int intel_huc_init_misc(struct intel_huc *huc);
-int intel_huc_init(struct intel_huc *huc);
-void intel_huc_fini(struct intel_huc *huc);
-int intel_huc_auth(struct intel_huc *huc);
-int intel_huc_check_status(struct intel_huc *huc);
-
-static inline void intel_huc_fini_misc(struct intel_huc *huc)
-{
- intel_uc_fw_cleanup_fetch(&huc->fw);
-}
-
-static inline int intel_huc_sanitize(struct intel_huc *huc)
-{
- intel_uc_fw_sanitize(&huc->fw);
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
deleted file mode 100644
index 05cbf8338f53..000000000000
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "intel_huc_fw.h"
-#include "i915_drv.h"
-
-/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-#define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 8
-#define BXT_BLD_NUM 2893
-
-#define SKL_HUC_FW_MAJOR 01
-#define SKL_HUC_FW_MINOR 07
-#define SKL_BLD_NUM 1398
-
-#define KBL_HUC_FW_MAJOR 02
-#define KBL_HUC_FW_MINOR 00
-#define KBL_BLD_NUM 1810
-
-#define GLK_HUC_FW_MAJOR 03
-#define GLK_HUC_FW_MINOR 01
-#define GLK_BLD_NUM 2893
-
-#define ICL_HUC_FW_MAJOR 8
-#define ICL_HUC_FW_MINOR 4
-#define ICL_BLD_NUM 3238
-
-#define HUC_FW_PATH(platform, major, minor, bld_num) \
- "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
- __stringify(minor) "_" __stringify(bld_num) ".bin"
-
-#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
- SKL_HUC_FW_MINOR, SKL_BLD_NUM)
-MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
-
-#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
- BXT_HUC_FW_MINOR, BXT_BLD_NUM)
-MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
-
-#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
- KBL_HUC_FW_MINOR, KBL_BLD_NUM)
-MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
-
-#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
- GLK_HUC_FW_MINOR, GLK_BLD_NUM)
-MODULE_FIRMWARE(I915_GLK_HUC_UCODE);
-
-#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \
- ICL_HUC_FW_MINOR, ICL_BLD_NUM)
-MODULE_FIRMWARE(I915_ICL_HUC_UCODE);
-
-static void huc_fw_select(struct intel_uc_fw *huc_fw)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- if (!HAS_HUC(dev_priv))
- return;
-
- if (i915_modparams.huc_firmware_path) {
- huc_fw->path = i915_modparams.huc_firmware_path;
- huc_fw->major_ver_wanted = 0;
- huc_fw->minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- huc_fw->path = I915_SKL_HUC_UCODE;
- huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- huc_fw->path = I915_BXT_HUC_UCODE;
- huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc_fw->path = I915_KBL_HUC_UCODE;
- huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- huc_fw->path = I915_GLK_HUC_UCODE;
- huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
- } else if (IS_ICELAKE(dev_priv)) {
- huc_fw->path = I915_ICL_HUC_UCODE;
- huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR;
- }
-}
-
-/**
- * intel_huc_fw_init_early() - initializes HuC firmware struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
-void intel_huc_fw_init_early(struct intel_huc *huc)
-{
- struct intel_uc_fw *huc_fw = &huc->fw;
-
- intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC);
- huc_fw_select(huc_fw);
-}
-
-static void huc_xfer_rsa(struct intel_huc *huc)
-{
- struct intel_uc_fw *fw = &huc->fw;
- struct sg_table *pages = fw->obj->mm.pages;
-
- /*
- * HuC firmware image is outside GuC accessible range.
- * Copy the RSA signature out of the image into
- * the perma-pinned region set aside for it
- */
- sg_pcopy_to_buffer(pages->sgl, pages->nents,
- huc->rsa_data_vaddr, fw->rsa_size,
- fw->rsa_offset);
-}
-
-static int huc_xfer_ucode(struct intel_huc *huc)
-{
- struct intel_uc_fw *huc_fw = &huc->fw;
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- struct intel_uncore *uncore = &dev_priv->uncore;
- unsigned long offset = 0;
- u32 size;
- int ret;
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- /* Set the source address for the uCode */
- offset = intel_uc_fw_ggtt_offset(huc_fw) +
- huc_fw->header_offset;
- intel_uncore_write(uncore, DMA_ADDR_0_LOW,
- lower_32_bits(offset));
- intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
- upper_32_bits(offset) & 0xFFFF);
-
- /*
- * Hardware doesn't look at destination address for HuC. Set it to 0,
- * but still program the correct address space.
- */
- intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
- intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- size = huc_fw->header_size + huc_fw->ucode_size;
- intel_uncore_write(uncore, DMA_COPY_SIZE, size);
-
- /* Start the DMA */
- intel_uncore_write(uncore, DMA_CTRL,
- _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
-
- /* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
-
- DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
-
- /* Disable the bits once DMA is over */
- intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
-
- intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * huc_fw_xfer() - DMA's the firmware
- * @huc_fw: the firmware descriptor
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_fw_xfer(struct intel_uc_fw *huc_fw)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
-
- huc_xfer_rsa(huc);
-
- return huc_xfer_ucode(huc);
-}
-
-/**
- * intel_huc_fw_upload() - load HuC uCode to device
- * @huc: intel_huc structure
- *
- * Called from intel_uc_init_hw() during driver load, resume from sleep and
- * after a GPU reset. Note that HuC must be loaded before GuC.
- *
- * The firmware image should have already been fetched into memory, so only
- * check that fetch succeeded, and then transfer the image to the h/w.
- *
- * Return: non-zero code on error
- */
-int intel_huc_fw_upload(struct intel_huc *huc)
-{
- return intel_uc_fw_upload(&huc->fw, huc_fw_xfer);
-}
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
new file mode 100644
index 000000000000..fa864d8f2b73
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#include "i915_drv.h"
+#include "intel_pch.h"
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 5));
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ /* PantherPoint is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* CometPoint is CNP Compatible */
+ return PCH_CNP;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ice Lake PCH\n");
+ WARN_ON(!IS_ICELAKE(dev_priv));
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_MCC;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
+ WARN_ON(!IS_TIGERLAKE(dev_priv));
+ return PCH_TGP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static unsigned short
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (IS_TIGERLAKE(dev_priv))
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (IS_ELKHARTLAKE(dev_priv))
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 5))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
+ else
+ DRM_DEBUG_KMS("Assuming no PCH\n");
+
+ return id;
+}
+
+void intel_detect_pch(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pch = NULL;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan trhough
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(dev_priv, id);
+ if (pch_type != PCH_NONE) {
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ id = intel_virt_detect_pch(dev_priv);
+ pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (WARN_ON(id && pch_type == PCH_NONE))
+ id = 0;
+
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(dev_priv)) {
+ DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ }
+
+ if (!pch)
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
new file mode 100644
index 000000000000..e6a2d65f19c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+struct drm_i915_private;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake PCH */
+ PCH_MCC, /* Mule Creek Canyon PCH */
+ PCH_TGP, /* Tiger Lake PCH */
+};
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
+#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
+#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
+#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+
+void intel_detect_pch(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d9a7a13ce32a..75ee027abb80 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -34,12 +34,13 @@
#include <drm/drm_plane_helper.h>
#include "display/intel_atomic.h"
+#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_drv.h"
+#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
@@ -1116,6 +1117,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
+ cpp = plane_state->base.fb->format->cpp[0];
+
/*
* Not 100% sure which way ELK should go here as the
* spec only says CL/CTG should assume 32bpp and BW
@@ -1129,9 +1132,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
*/
if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
level != G4X_WM_LEVEL_NORMAL)
- cpp = 4;
- else
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = max(cpp, 4u);
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
@@ -1198,8 +1199,8 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 pri_val);
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
@@ -1566,13 +1567,13 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
g4x_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -2185,13 +2186,13 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
vlv_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -2493,8 +2494,8 @@ struct ilk_wm_maximums {
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value, bool is_lp)
{
u32 method1, method2;
@@ -2503,19 +2504,19 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
if (!is_lp)
return method1;
- method2 = ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->base.dst),
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->base.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->base.dst),
cpp, mem_value);
return min(method1, method2);
@@ -2525,8 +2526,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value)
{
u32 method1, method2;
@@ -2535,15 +2536,15 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
- method2 = ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->base.dst),
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->base.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->base.dst),
cpp, mem_value);
return min(method1, method2);
}
@@ -2552,8 +2553,8 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value)
{
int cpp;
@@ -2561,29 +2562,29 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- return ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- pstate->base.crtc_w, cpp, mem_value);
+ return ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->base.adjusted_mode.crtc_htotal,
+ plane_state->base.crtc_w, cpp, mem_value);
}
/* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 pri_val)
{
int cpp;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp);
}
static unsigned int
@@ -2752,7 +2753,7 @@ static bool ilk_validate_wm_level(int level,
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_crtc *intel_crtc,
int level,
- struct intel_crtc_state *cstate,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
const struct intel_plane_state *sprstate,
const struct intel_plane_state *curstate,
@@ -2770,30 +2771,30 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
if (pristate) {
- result->pri_val = ilk_compute_pri_wm(cstate, pristate,
+ result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
pri_latency, level);
- result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+ result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
}
if (sprstate)
- result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+ result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
if (curstate)
- result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+ result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
result->enable = true;
}
static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(cstate->base.state);
+ to_intel_atomic_state(crtc_state->base.state);
const struct drm_display_mode *adjusted_mode =
- &cstate->base.adjusted_mode;
+ &crtc_state->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!cstate->base.active)
+ if (!crtc_state->base.active)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
@@ -3101,10 +3102,10 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
}
/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3116,9 +3117,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
- pipe_wm = &cstate->wm.ilk.optimal;
+ pipe_wm = &crtc_state->wm.ilk.optimal;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) {
const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
@@ -3129,7 +3130,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
curstate = ps;
}
- pipe_wm->pipe_enabled = cstate->base.active;
+ pipe_wm->pipe_enabled = crtc_state->base.active;
if (sprstate) {
pipe_wm->sprites_enabled = sprstate->base.visible;
pipe_wm->sprites_scaled = sprstate->base.visible &&
@@ -3148,11 +3149,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+ ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+ pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
@@ -3162,7 +3163,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+ ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3736,14 +3737,13 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-bool intel_can_enable_sagv(struct drm_atomic_state *state)
+bool intel_can_enable_sagv(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct intel_crtc *crtc;
struct intel_plane *plane;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
enum pipe pipe;
int level, latency;
int sagv_block_time_us;
@@ -3761,27 +3761,27 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/*
* If there are no active CRTCs, no additional checks need be performed
*/
- if (hweight32(intel_state->active_crtcs) == 0)
+ if (hweight32(state->active_crtcs) == 0)
return true;
/*
* SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*/
- if (hweight32(intel_state->active_crtcs) > 1)
+ if (hweight32(state->active_crtcs) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(intel_state->active_crtcs) - 1;
+ pipe = ffs(state->active_crtcs) - 1;
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- cstate = to_intel_crtc_state(crtc->base.state);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane->id];
+ &crtc_state->wm.skl.optimal.planes[plane->id];
/* Skip this plane if it's not enabled */
if (!wm->wm[0].plane_en)
@@ -3812,7 +3812,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
}
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+ const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
const int num_active,
struct skl_ddb_allocation *ddb)
@@ -3826,7 +3826,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &cstate->base.adjusted_mode;
+ adjusted_mode = &crtc_state->base.adjusted_mode;
total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
@@ -3849,23 +3849,22 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+ const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
- struct drm_atomic_state *state = cstate->base.state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = cstate->base.crtc;
- const struct drm_crtc_state *crtc_state;
- const struct drm_crtc *crtc;
+ struct drm_crtc *for_crtc = crtc_state->base.crtc;
+ const struct intel_crtc *crtc;
u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
u32 i;
- if (WARN_ON(!state) || !cstate->base.active) {
+ if (WARN_ON(!state) || !crtc_state->base.active) {
alloc->start = 0;
alloc->end = 0;
*num_active = hweight32(dev_priv->active_crtcs);
@@ -3877,7 +3876,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
else
*num_active = hweight32(dev_priv->active_crtcs);
- ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+ ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
*num_active, ddb);
/*
@@ -3902,16 +3901,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- const struct drm_display_mode *adjusted_mode;
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
- enum pipe pipe;
- if (!crtc_state->enable)
+ if (!crtc_state->base.enable)
continue;
- pipe = to_intel_crtc(crtc)->pipe;
- adjusted_mode = &crtc_state->adjusted_mode;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
total_width += hdisplay;
@@ -3930,7 +3928,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane);
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
@@ -4062,15 +4060,15 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
* Caller should take care of dividing & rounding off the value.
*/
static uint_fixed_16_16_t
-skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
- if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+ if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
/* n.b., src is 16.16 fixed point, dst is whole integer */
@@ -4079,20 +4077,20 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
* Cursors only support 0/180 degree rotation,
* hence no need to account for rotation here.
*/
- src_w = pstate->base.src_w >> 16;
- src_h = pstate->base.src_h >> 16;
- dst_w = pstate->base.crtc_w;
- dst_h = pstate->base.crtc_h;
+ src_w = plane_state->base.src_w >> 16;
+ src_h = plane_state->base.src_h >> 16;
+ dst_w = plane_state->base.crtc_w;
+ dst_h = plane_state->base.crtc_h;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- src_w = drm_rect_width(&pstate->base.src) >> 16;
- src_h = drm_rect_height(&pstate->base.src) >> 16;
- dst_w = drm_rect_width(&pstate->base.dst);
- dst_h = drm_rect_height(&pstate->base.dst);
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
}
fp_w_ratio = div_fixed16(src_w, dst_w);
@@ -4137,49 +4135,46 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
}
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct drm_crtc_state *crtc_state = &cstate->base;
- struct drm_atomic_state *state = crtc_state->state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
- struct intel_plane_state *intel_pstate;
+ const struct drm_plane_state *drm_plane_state;
int crtc_clock, dotclk;
u32 pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
- if (!cstate->base.enable)
+ if (!crtc_state->base.enable)
return 0;
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
uint_fixed_16_16_t plane_downscale;
uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
int bpp;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
- if (!intel_wm_plane_visible(cstate,
- to_intel_plane_state(pstate)))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
continue;
- if (WARN_ON(!pstate->fb))
+ if (WARN_ON(!plane_state->base.fb))
return -EINVAL;
- intel_pstate = to_intel_plane_state(pstate);
- plane_downscale = skl_plane_downscale_amount(cstate,
- intel_pstate);
- bpp = pstate->fb->format->cpp[0] * 8;
+ plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
+ bpp = plane_state->base.fb->format->cpp[0] * 8;
if (bpp == 64)
plane_downscale = mul_fixed16(plane_downscale,
fp_9_div_8);
max_downscale = max_fixed16(plane_downscale, max_downscale);
}
- pipe_downscale = skl_pipe_downscale_amount(cstate);
+ pipe_downscale = skl_pipe_downscale_amount(crtc_state);
pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
- crtc_clock = crtc_state->adjusted_mode.crtc_clock;
+ crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
@@ -4196,12 +4191,11 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
}
static u64
-skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
const int plane)
{
- struct intel_plane *intel_plane =
- to_intel_plane(intel_pstate->base.plane);
+ struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane);
u32 data_rate;
u32 width = 0, height = 0;
struct drm_framebuffer *fb;
@@ -4209,10 +4203,10 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
uint_fixed_16_16_t down_scale_amount;
u64 rate;
- if (!intel_pstate->base.visible)
+ if (!plane_state->base.visible)
return 0;
- fb = intel_pstate->base.fb;
+ fb = plane_state->base.fb;
format = fb->format->format;
if (intel_plane->id == PLANE_CURSOR)
@@ -4225,8 +4219,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ width = drm_rect_width(&plane_state->base.src) >> 16;
+ height = drm_rect_height(&plane_state->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
if (plane == 1 && is_planar_yuv_format(format)) {
@@ -4236,7 +4230,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
data_rate = width * height;
- down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
+ down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
@@ -4245,35 +4239,32 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
}
static u64
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_crtc_state *cstate = &intel_cstate->base;
- struct drm_atomic_state *state = cstate->state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ const struct drm_plane_state *drm_plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
enum plane_id plane_id = to_intel_plane(plane)->id;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
u64 rate;
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
/* packed/y */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
/* uv-plane */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 1);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
uv_plane_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -4282,28 +4273,25 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
}
static u64
-icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate)
{
- struct drm_crtc_state *cstate = &intel_cstate->base;
- struct drm_atomic_state *state = cstate->state;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ const struct drm_plane_state *drm_plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
+ if (WARN_ON(!crtc_state->base.state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
enum plane_id plane_id = to_intel_plane(plane)->id;
u64 rate;
- if (!intel_pstate->linked_plane) {
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
+ if (!plane_state->linked_plane) {
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
} else {
@@ -4316,18 +4304,16 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
* NULL if we try get_new_plane_state(), so we
* always calculate from the master.
*/
- if (intel_pstate->slave)
+ if (plane_state->slave)
continue;
/* Y plane rate is calculated on the slave */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
- y_plane_id = intel_pstate->linked_plane->id;
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
+ y_plane_id = plane_state->linked_plane->id;
plane_data_rate[y_plane_id] = rate;
total_data_rate += rate;
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 1);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -4337,14 +4323,14 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
}
static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_crtc *crtc = cstate->base.crtc;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+ struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
@@ -4357,40 +4343,40 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
int level;
/* Clear the partitioning for disabled planes. */
- memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
- memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
+ memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+ memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
- if (!cstate->base.active) {
+ if (!crtc_state->base.active) {
alloc->start = alloc->end = 0;
return 0;
}
if (INTEL_GEN(dev_priv) >= 11)
total_data_rate =
- icl_get_total_relative_data_rate(cstate,
+ icl_get_total_relative_data_rate(crtc_state,
plane_data_rate);
else
total_data_rate =
- skl_get_total_relative_data_rate(cstate,
+ skl_get_total_relative_data_rate(crtc_state,
plane_data_rate,
uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+ skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
ddb, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
+ total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
alloc_size -= total[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+ crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
alloc->end - total[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+ crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
if (total_data_rate == 0)
return 0;
@@ -4403,7 +4389,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
blocks = 0;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
const struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
if (WARN_ON(wm->wm[level].min_ddb_alloc >
@@ -4438,7 +4424,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
const struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
u16 extra;
@@ -4477,9 +4463,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
- &cstate->wm.skl.plane_ddb_y[plane_id];
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
- &cstate->wm.skl.plane_ddb_uv[plane_id];
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
@@ -4510,7 +4496,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
/*
* We only disable the watermarks for each plane if
@@ -4547,7 +4533,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
@@ -4599,43 +4585,43 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
}
static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
- if (!cstate->base.active)
+ if (!crtc_state->base.active)
return u32_to_fixed16(0);
- pixel_rate = cstate->pixel_rate;
+ pixel_rate = crtc_state->pixel_rate;
if (WARN_ON(pixel_rate == 0))
return u32_to_fixed16(0);
- crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
+ crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal;
linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us;
}
static u32
-skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+ if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return 0;
/*
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = cstate->pixel_rate;
- downscale_amount = skl_plane_downscale_amount(cstate, pstate);
+ adjusted_pixel_rate = crtc_state->pixel_rate;
+ downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
return mul_round_up_u32_fixed16(adjusted_pixel_rate,
downscale_amount);
@@ -4768,13 +4754,13 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
return level > 0;
}
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4800,14 +4786,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
+ crtc_state->base.adjusted_mode.crtc_htotal,
latency,
wp->plane_blocks_per_line);
if (wp->y_tiled) {
selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
- if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
+ if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
@@ -4894,18 +4880,18 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
}
static void
-skl_compute_wm_levels(const struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- skl_compute_plane_wm(cstate, level, wm_params,
+ skl_compute_plane_wm(crtc_state, level, wm_params,
result_prev, result);
result_prev = result;
@@ -4913,14 +4899,14 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
}
static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
u32 linetime_wm;
- linetime_us = intel_get_linetime_us(cstate);
+ linetime_us = intel_get_linetime_us(crtc_state);
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
/* Display WA #1135: BXT:ALL GLK:ALL */
@@ -4930,11 +4916,11 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
return linetime_wm;
}
-static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
- struct drm_device *dev = cstate->base.crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
u16 trans_min, trans_y_tile_min;
const u16 trans_amount = 10; /* This is configurable amount */
@@ -5092,13 +5078,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
+static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- struct drm_crtc_state *crtc_state = &cstate->base;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ const struct drm_plane_state *drm_plane_state;
int ret;
/*
@@ -5107,19 +5092,20 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
*/
memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state,
+ &crtc_state->base) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
if (INTEL_GEN(dev_priv) >= 11)
- ret = icl_build_plane_wm(cstate, intel_pstate);
+ ret = icl_build_plane_wm(crtc_state, plane_state);
else
- ret = skl_build_plane_wm(cstate, intel_pstate);
+ ret = skl_build_plane_wm(crtc_state, plane_state);
if (ret)
return ret;
}
- pipe_wm->linetime = skl_compute_linetime_wm(cstate);
+ pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
return 0;
}
@@ -5273,10 +5259,10 @@ static u32
pipes_modified(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
u32 i, ret = 0;
- for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
ret |= drm_crtc_mask(&crtc->base);
return ret;
@@ -5652,11 +5638,11 @@ skl_compute_wm(struct intel_atomic_state *state)
}
static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
@@ -5666,9 +5652,9 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
}
static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
@@ -5678,8 +5664,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
mutex_lock(&dev_priv->wm.wm_mutex);
- if (cstate->base.active_changed)
- skl_atomic_update_crtc_wm(state, cstate);
+ if (crtc_state->base.active_changed)
+ skl_atomic_update_crtc_wm(state, crtc_state);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5735,28 +5721,29 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
+ crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
mutex_lock(&dev_priv->wm.wm_mutex);
- if (cstate->wm.need_postvbl_update) {
- intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- }
+ crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
+ ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5812,13 +5799,13 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
skl_ddb_get_hw_state(dev_priv, ddb);
for_each_intel_crtc(&dev_priv->drm, crtc) {
- cstate = to_intel_crtc_state(crtc->base.state);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
- skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
if (crtc->active)
hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
@@ -5835,8 +5822,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
- struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
@@ -6891,9 +6878,10 @@ void gen6_rps_boost(struct i915_request *rq)
/* Serializes with i915_request_retire() */
boost = false;
spin_lock_irqsave(&rq->lock, flags);
- if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
+ if (!i915_request_has_waitboost(rq) &&
+ !dma_fence_is_signaled_locked(&rq->fence)) {
boost = !atomic_fetch_inc(&rps->num_waiters);
- rq->waitboost = true;
+ rq->flags |= I915_REQUEST_WAITBOOST;
}
spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)
@@ -7175,7 +7163,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC(dev_priv))
+ if (HAS_GT_UC(dev_priv))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -7192,7 +7180,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
* next request to execute. If the idle hysteresis is less than that
* interrupt service latency, the hardware will automatically gate
* the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from intel_pstate is that we
+ * the service latency. A similar guide from plane_state is that we
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
@@ -7256,7 +7244,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC(dev_priv))
+ if (HAS_GT_UC(dev_priv))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -7271,7 +7259,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
* next request to execute. If the idle hysteresis is less than that
* interrupt service latency, the hardware will automatically gate
* the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from intel_pstate is that we
+ * the service latency. A similar guide from plane_state is that we
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
@@ -9181,9 +9169,6 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* The GTT cache must be disabled if the system is using 2M pages. */
- bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
- I915_GTT_PAGE_SIZE_2M);
enum pipe pipe;
/* WaSwitchSolVfFArbitrationPriority:bdw */
@@ -9216,9 +9201,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
- /* WaGttCachingOffByDefault:bdw */
- I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
-
/* WaKVMNotificationOnConfigChange:bdw */
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
@@ -9483,12 +9465,6 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
* LSQC Setting Recommendations.
*/
gen8_set_l3sqc_credits(dev_priv, 38, 2);
-
- /*
- * GTT cache may not work with big pages, so if those
- * are ever enabled GTT cache may need to be disabled.
- */
- I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9621,7 +9597,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 11))
+ if (IS_GEN(dev_priv, 12))
+ dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 1b489fa399e1..e3573e1e16e3 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -10,10 +10,10 @@
#include "i915_reg.h"
-struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
struct i915_request;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -52,7 +52,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_atomic_state *state);
+bool intel_can_enable_sagv(struct intel_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8d1aebc3e857..2fd3c097e1f5 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,6 +32,7 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_trace.h"
/**
* DOC: runtime pm
@@ -592,7 +593,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
pm_runtime_put(kdev);
}
-void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm)
+void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
int count = atomic_read(&rpm->wakeref_count);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 2ee8f9522e05..ae64ff14c642 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -173,7 +173,7 @@ enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
-void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index a115625e980c..e06b35b844a0 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -24,10 +24,8 @@
#include <asm/iosf_mbi.h>
-#include "intel_sideband.h"
-
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_sideband.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
deleted file mode 100644
index ae45651ac73c..000000000000
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "gt/intel_reset.h"
-#include "intel_uc.h"
-#include "intel_guc.h"
-#include "intel_guc_ads.h"
-#include "intel_guc_submission.h"
-#include "i915_drv.h"
-
-static void guc_free_load_err_log(struct intel_guc *guc);
-
-/* Reset GuC providing us with fresh state for both GuC and HuC.
- */
-static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 guc_status;
-
- ret = intel_reset_guc(dev_priv);
- if (ret) {
- DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
- return ret;
- }
-
- guc_status = I915_READ(GUC_STATUS);
- WARN(!(guc_status & GS_MIA_IN_RESET),
- "GuC status: 0x%x, MIA core expected to be in reset\n",
- guc_status);
-
- return ret;
-}
-
-static int __get_platform_enable_guc(struct drm_i915_private *i915)
-{
- struct intel_uc_fw *guc_fw = &i915->guc.fw;
- struct intel_uc_fw *huc_fw = &i915->huc.fw;
- int enable_guc = 0;
-
- /* Default is to use HuC if we know GuC and HuC firmwares */
- if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw))
- enable_guc |= ENABLE_GUC_LOAD_HUC;
-
- /* Any platform specific fine-tuning can be done here */
-
- return enable_guc;
-}
-
-static int __get_default_guc_log_level(struct drm_i915_private *i915)
-{
- int guc_log_level;
-
- if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
- guc_log_level = GUC_LOG_LEVEL_DISABLED;
- else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
- IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- guc_log_level = GUC_LOG_LEVEL_MAX;
- else
- guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
-
- /* Any platform specific fine-tuning can be done here */
-
- return guc_log_level;
-}
-
-/**
- * sanitize_options_early - sanitize uC related modparam options
- * @i915: device private
- *
- * In case of "enable_guc" option this function will attempt to modify
- * it only if it was initially set to "auto(-1)". Default value for this
- * modparam varies between platforms and it is hardcoded in driver code.
- * Any other modparam value is only monitored against availability of the
- * related hardware or firmware definitions.
- *
- * In case of "guc_log_level" option this function will attempt to modify
- * it only if it was initially set to "auto(-1)" or if initial value was
- * "enable(1..4)" on platforms without the GuC. Default value for this
- * modparam varies between platforms and is usually set to "disable(0)"
- * unless GuC is enabled on given platform and the driver is compiled with
- * debug config when this modparam will default to "enable(1..4)".
- */
-static void sanitize_options_early(struct drm_i915_private *i915)
-{
- struct intel_uc_fw *guc_fw = &i915->guc.fw;
- struct intel_uc_fw *huc_fw = &i915->huc.fw;
-
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(i915);
-
- DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
- yesno(intel_uc_is_using_guc_submission(i915)),
- yesno(intel_uc_is_using_huc(i915)));
-
- /* Verify GuC firmware availability */
- if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- !HAS_GUC(i915) ? "no GuC hardware" :
- "no GuC firmware");
- }
-
- /* Verify HuC firmware availability */
- if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- !HAS_HUC(i915) ? "no HuC hardware" :
- "no HuC firmware");
- }
-
- /* XXX: GuC submission is unavailable for now */
- if (intel_uc_is_using_guc_submission(i915)) {
- DRM_INFO("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- "GuC submission not supported");
- DRM_INFO("Switching to non-GuC submission mode!\n");
- i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
- }
-
- /* A negative value means "use platform/config default" */
- if (i915_modparams.guc_log_level < 0)
- i915_modparams.guc_log_level =
- __get_default_guc_log_level(i915);
-
- if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
- !HAS_GUC(i915) ? "no GuC hardware" :
- "GuC not enabled");
- i915_modparams.guc_log_level = 0;
- }
-
- if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
- "verbosity too high");
- i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
- }
-
- DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
- i915_modparams.guc_log_level,
- yesno(i915_modparams.guc_log_level),
- yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
- GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
-
- /* Make sure that sanitization was done */
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-}
-
-void intel_uc_init_early(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- intel_guc_init_early(guc);
- intel_huc_init_early(huc);
-
- sanitize_options_early(i915);
-}
-
-void intel_uc_cleanup_early(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- guc_free_load_err_log(guc);
-}
-
-/**
- * intel_uc_init_mmio - setup uC MMIO access
- * @i915: device private
- *
- * Setup minimal state necessary for MMIO accesses later in the
- * initialization sequence.
- */
-void intel_uc_init_mmio(struct drm_i915_private *i915)
-{
- intel_guc_init_send_regs(&i915->guc);
-}
-
-static void guc_capture_load_err_log(struct intel_guc *guc)
-{
- if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
- return;
-
- if (!guc->load_err_log)
- guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
-
- return;
-}
-
-static void guc_free_load_err_log(struct intel_guc *guc)
-{
- if (guc->load_err_log)
- i915_gem_object_put(guc->load_err_log);
-}
-
-static void guc_reset_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.reset(guc_to_i915(guc));
-}
-
-static void guc_enable_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.enable(guc_to_i915(guc));
-}
-
-static void guc_disable_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.disable(guc_to_i915(guc));
-}
-
-static int guc_enable_communication(struct intel_guc *guc)
-{
- guc_enable_interrupts(guc);
-
- return intel_guc_ct_enable(&guc->ct);
-}
-
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
-{
- intel_guc_ct_disable(&guc->ct);
-
- guc_disable_interrupts(guc);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-int intel_uc_init_misc(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret;
-
- if (!USES_GUC(i915))
- return 0;
-
- ret = intel_guc_init_misc(guc);
- if (ret)
- return ret;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_init_misc(huc);
- if (ret)
- goto err_guc;
- }
-
- return 0;
-
-err_guc:
- intel_guc_fini_misc(guc);
- return ret;
-}
-
-void intel_uc_fini_misc(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- if (!USES_GUC(i915))
- return;
-
- if (USES_HUC(i915))
- intel_huc_fini_misc(huc);
-
- intel_guc_fini_misc(guc);
-}
-
-int intel_uc_init(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret;
-
- if (!USES_GUC(i915))
- return 0;
-
- if (!HAS_GUC(i915))
- return -ENODEV;
-
- /* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(USES_GUC_SUBMISSION(i915));
-
- ret = intel_guc_init(guc);
- if (ret)
- return ret;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_init(huc);
- if (ret)
- goto err_guc;
- }
-
- if (USES_GUC_SUBMISSION(i915)) {
- /*
- * This is stuff we need to have available at fw load time
- * if we are planning to enable submission later
- */
- ret = intel_guc_submission_init(guc);
- if (ret)
- goto err_huc;
- }
-
- return 0;
-
-err_huc:
- if (USES_HUC(i915))
- intel_huc_fini(huc);
-err_guc:
- intel_guc_fini(guc);
- return ret;
-}
-
-void intel_uc_fini(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!USES_GUC(i915))
- return;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- if (USES_GUC_SUBMISSION(i915))
- intel_guc_submission_fini(guc);
-
- if (USES_HUC(i915))
- intel_huc_fini(&i915->huc);
-
- intel_guc_fini(guc);
-}
-
-static void __uc_sanitize(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- intel_huc_sanitize(huc);
- intel_guc_sanitize(guc);
-
- __intel_uc_reset_hw(i915);
-}
-
-void intel_uc_sanitize(struct drm_i915_private *i915)
-{
- if (!USES_GUC(i915))
- return;
-
- __uc_sanitize(i915);
-}
-
-int intel_uc_init_hw(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret, attempts;
-
- if (!USES_GUC(i915))
- return 0;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- guc_reset_interrupts(guc);
-
- /* WaEnableuKernelHeaderValidFix:skl */
- /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN(i915, 9))
- attempts = 3;
- else
- attempts = 1;
-
- while (attempts--) {
- /*
- * Always reset the GuC just before (re)loading, so
- * that the state and timing are fairly predictable
- */
- ret = __intel_uc_reset_hw(i915);
- if (ret)
- goto err_out;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_fw_upload(huc);
- if (ret)
- goto err_out;
- }
-
- intel_guc_ads_reset(guc);
- intel_guc_init_params(guc);
- ret = intel_guc_fw_upload(guc);
- if (ret == 0)
- break;
-
- DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
- "retry %d more time(s)\n", ret, attempts);
- }
-
- /* Did we succeded or run out of retries? */
- if (ret)
- goto err_log_capture;
-
- ret = guc_enable_communication(guc);
- if (ret)
- goto err_log_capture;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_auth(huc);
- if (ret)
- goto err_communication;
- }
-
- ret = intel_guc_sample_forcewake(guc);
- if (ret)
- goto err_communication;
-
- if (USES_GUC_SUBMISSION(i915)) {
- ret = intel_guc_submission_enable(guc);
- if (ret)
- goto err_communication;
- }
-
- dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
- guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(i915->drm.dev, "GuC submission %s\n",
- enableddisabled(USES_GUC_SUBMISSION(i915)));
- dev_info(i915->drm.dev, "HuC %s\n",
- enableddisabled(USES_HUC(i915)));
-
- return 0;
-
- /*
- * We've failed to load the firmware :(
- */
-err_communication:
- guc_disable_communication(guc);
-err_log_capture:
- guc_capture_load_err_log(guc);
-err_out:
- __uc_sanitize(i915);
-
- /*
- * Note that there is no fallback as either user explicitly asked for
- * the GuC or driver default option was to run with the GuC enabled.
- */
- if (GEM_WARN_ON(ret == -EIO))
- ret = -EINVAL;
-
- dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
- return ret;
-}
-
-void intel_uc_fini_hw(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- if (USES_GUC_SUBMISSION(i915))
- intel_guc_submission_disable(guc);
-
- guc_disable_communication(guc);
- __uc_sanitize(i915);
-}
-
-/**
- * intel_uc_reset_prepare - Prepare for reset
- * @i915: device private
- *
- * Preparing for full gpu reset.
- */
-void intel_uc_reset_prepare(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- guc_stop_communication(guc);
- __uc_sanitize(i915);
-}
-
-void intel_uc_runtime_suspend(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- int err;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- err = intel_guc_suspend(guc);
- if (err)
- DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
-
- guc_disable_communication(guc);
-}
-
-void intel_uc_suspend(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- intel_wakeref_t wakeref;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- intel_uc_runtime_suspend(i915);
-}
-
-int intel_uc_resume(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- int err;
-
- if (!intel_guc_is_loaded(guc))
- return 0;
-
- guc_enable_communication(guc);
-
- err = intel_guc_resume(guc);
- if (err) {
- DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
- return err;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
deleted file mode 100644
index 3ea06c87dfcd..000000000000
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-#ifndef _INTEL_UC_H_
-#define _INTEL_UC_H_
-
-#include "intel_guc.h"
-#include "intel_huc.h"
-#include "i915_params.h"
-
-void intel_uc_init_early(struct drm_i915_private *dev_priv);
-void intel_uc_cleanup_early(struct drm_i915_private *dev_priv);
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
-int intel_uc_init_misc(struct drm_i915_private *dev_priv);
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
-void intel_uc_sanitize(struct drm_i915_private *dev_priv);
-int intel_uc_init_hw(struct drm_i915_private *dev_priv);
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
-int intel_uc_init(struct drm_i915_private *dev_priv);
-void intel_uc_fini(struct drm_i915_private *dev_priv);
-void intel_uc_reset_prepare(struct drm_i915_private *i915);
-void intel_uc_suspend(struct drm_i915_private *i915);
-void intel_uc_runtime_suspend(struct drm_i915_private *i915);
-int intel_uc_resume(struct drm_i915_private *dev_priv);
-
-static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc > 0;
-}
-
-static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
-}
-
-static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
-{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
deleted file mode 100644
index f342ddd47df8..000000000000
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <linux/bitfield.h>
-#include <linux/firmware.h>
-#include <drm/drm_print.h>
-
-#include "intel_uc_fw.h"
-#include "i915_drv.h"
-
-/**
- * intel_uc_fw_fetch - fetch uC firmware
- *
- * @dev_priv: device private
- * @uc_fw: uC firmware
- *
- * Fetch uC firmware into GEM obj.
- */
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- if (!uc_fw->path) {
- dev_info(dev_priv->drm.dev,
- "%s: No firmware was defined for %s!\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_platform_name(INTEL_INFO(dev_priv)->platform));
- return;
- }
-
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err) {
- DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- fw->size, sizeof(struct uc_css_header));
- err = -ENODATA;
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) *
- sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Mismatched firmware header definition\n",
- intel_uc_fw_type_repr(uc_fw->type));
- err = -ENOEXEC;
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
- DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
- intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
- err = -ENOEXEC;
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
- err = -ENOEXEC;
- goto fail;
- }
-
- /* Get version numbers from the CSS header */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
- css->sw_version);
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
- css->sw_version);
- break;
-
- default:
- MISSING_CASE(uc_fw->type);
- break;
- }
-
- DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("%s: Skipping firmware version check\n",
- intel_uc_fw_type_repr(uc_fw->type));
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- obj = i915_gem_object_create_shmem_from_data(dev_priv,
- fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- release_firmware(fw);
- return;
-
-fail:
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- DRM_INFO("%s: Firmware can be downloaded from %s\n",
- intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
-
- release_firmware(fw); /* OK even if fw is NULL */
-}
-
-static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
- struct i915_vma dummy = {
- .node.start = intel_uc_fw_ggtt_offset(uc_fw),
- .node.size = obj->base.size,
- .pages = obj->mm.pages,
- .vm = &ggtt->vm,
- };
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
-
- /* uc_fw->obj cache domains were not controlled across suspend */
- drm_clflush_sg(dummy.pages);
-
- ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
-}
-
-static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
- u64 start = intel_uc_fw_ggtt_offset(uc_fw);
-
- ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
-}
-
-/**
- * intel_uc_fw_upload - load uC firmware using custom loader
- * @uc_fw: uC firmware
- * @xfer: custom uC firmware loader function
- *
- * Loads uC firmware using custom loader and updates internal flags.
- *
- * Return: 0 on success, non-zero on failure.
- */
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw))
-{
- int err;
-
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
-
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
-
- uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- /* Call custom loader */
- intel_uc_fw_ggtt_bind(uc_fw);
- err = xfer(uc_fw);
- intel_uc_fw_ggtt_unbind(uc_fw);
- if (err)
- goto fail;
-
- uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->path,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
-
- return 0;
-
-fail:
- uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
-
- return err;
-}
-
-int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
-{
- int err;
-
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
-
- err = i915_gem_object_pin_pages(uc_fw->obj);
- if (err)
- DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
-
- return err;
-}
-
-void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
-{
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
-
- i915_gem_object_unpin_pages(uc_fw->obj);
-}
-
-u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct drm_mm_node *node = &ggtt->uc_fw;
-
- GEM_BUG_ON(!node->allocated);
- GEM_BUG_ON(upper_32_bits(node->start));
- GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
-
- return lower_32_bits(node->start);
-}
-
-/**
- * intel_uc_fw_cleanup_fetch - cleanup uC firmware
- *
- * @uc_fw: uC firmware
- *
- * Cleans up uC firmware by releasing the firmware GEM obj.
- */
-void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&uc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
-
-/**
- * intel_uc_fw_dump - dump information about uC firmware
- * @uc_fw: uC firmware
- * @p: the &drm_printer
- *
- * Pretty printer for uC firmware.
- */
-void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
-{
- drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
- drm_printf(p, "\tstatus: fetch %s, load %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status),
- intel_uc_fw_status_repr(uc_fw->load_status));
- drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
- drm_printf(p, "\theader: offset %u, size %u\n",
- uc_fw->header_offset, uc_fw->header_size);
- drm_printf(p, "\tuCode: offset %u, size %u\n",
- uc_fw->ucode_offset, uc_fw->ucode_size);
- drm_printf(p, "\tRSA: offset %u, size %u\n",
- uc_fw->rsa_offset, uc_fw->rsa_size);
-}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
deleted file mode 100644
index ff98f8661d72..000000000000
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef _INTEL_UC_FW_H_
-#define _INTEL_UC_FW_H_
-
-struct drm_printer;
-struct drm_i915_private;
-
-/* Home of GuC, HuC and DMC firmwares */
-#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
-
-enum intel_uc_fw_status {
- INTEL_UC_FIRMWARE_FAIL = -1,
- INTEL_UC_FIRMWARE_NONE = 0,
- INTEL_UC_FIRMWARE_PENDING,
- INTEL_UC_FIRMWARE_SUCCESS
-};
-
-enum intel_uc_fw_type {
- INTEL_UC_FW_TYPE_GUC,
- INTEL_UC_FW_TYPE_HUC
-};
-
-/*
- * This structure encapsulates all the data needed during the process
- * of fetching, caching, and loading the firmware image into the uC.
- */
-struct intel_uc_fw {
- const char *path;
- size_t size;
- struct drm_i915_gem_object *obj;
- enum intel_uc_fw_status fetch_status;
- enum intel_uc_fw_status load_status;
-
- /*
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- */
- u16 major_ver_wanted;
- u16 minor_ver_wanted;
- u16 major_ver_found;
- u16 minor_ver_found;
-
- enum intel_uc_fw_type type;
- u32 header_size;
- u32 header_offset;
- u32 rsa_size;
- u32 rsa_offset;
- u32 ucode_size;
- u32 ucode_offset;
-};
-
-static inline
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
- switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
- }
- return "<invalid>";
-}
-
-static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
-{
- switch (type) {
- case INTEL_UC_FW_TYPE_GUC:
- return "GuC";
- case INTEL_UC_FW_TYPE_HUC:
- return "HuC";
- }
- return "uC";
-}
-
-static inline
-void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type)
-{
- uc_fw->path = NULL;
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
- uc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
- uc_fw->type = type;
-}
-
-static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
-{
- return uc_fw->path != NULL;
-}
-
-static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
-{
- return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
-}
-
-static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
-{
- if (intel_uc_fw_is_loaded(uc_fw))
- uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
-}
-
-/**
- * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded.
- * @uc_fw: uC firmware.
- *
- * Get the size of the firmware and header that will be uploaded to WOPCM.
- *
- * Return: Upload firmware size, or zero on firmware fetch failure.
- */
-static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
-{
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- return uc_fw->header_size + uc_fw->ucode_size;
-}
-
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw);
-void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw));
-int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
-void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
-u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw);
-void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
-
-#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index da33aa672c3d..9e583f13a9e4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -25,8 +25,8 @@
#include <asm/iosf_mbi.h>
#include "i915_drv.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -34,6 +34,32 @@
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ spin_lock_init(&mmio_debug->lock);
+ mmio_debug->unclaimed_mmio_check = 1;
+}
+
+static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ lockdep_assert_held(&mmio_debug->lock);
+
+ /* Save and disable mmio debugging for the user bypass */
+ if (!mmio_debug->suspend_count++) {
+ mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
+ mmio_debug->unclaimed_mmio_check = 0;
+ }
+}
+
+static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ lockdep_assert_held(&mmio_debug->lock);
+
+ if (!--mmio_debug->suspend_count)
+ mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
+}
+
static const char * const forcewake_domain_names[] = {
"render",
"blitter",
@@ -78,6 +104,8 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
+ GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
+ d->uncore->fw_domains_timer |= d->mask;
d->wake_count++;
hrtimer_start_range_ns(&d->timer,
NSEC_PER_MSEC,
@@ -322,7 +350,7 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+ if (IS_VALLEYVIEW(uncore->i915))
n = fifo_free_entries(uncore);
else
n = uncore->fifo_count;
@@ -344,7 +372,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
+ struct intel_uncore *uncore = domain->uncore;
unsigned long irqflags;
assert_rpm_device_not_suspended(uncore->rpm);
@@ -353,9 +381,10 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_RESTART;
spin_lock_irqsave(&uncore->lock, irqflags);
- if (WARN_ON(domain->wake_count == 0))
- domain->wake_count++;
+ uncore->fw_domains_timer &= ~domain->mask;
+
+ GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count == 0)
uncore->funcs.force_wake_put(uncore, domain->mask);
@@ -473,6 +502,11 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret = false;
+ lockdep_assert_held(&uncore->debug->lock);
+
+ if (uncore->debug->suspend_count)
+ return false;
+
if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
ret |= fpga_check_for_unclaimed_mmio(uncore);
@@ -485,15 +519,13 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
return ret;
}
-static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
- unsigned int restore_forcewake)
+static void forcewake_early_sanitize(struct intel_uncore *uncore,
+ unsigned int restore_forcewake)
{
- /* clear out unclaimed reg detection bit */
- if (check_for_unclaimed_mmio(uncore))
- DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
+ if (IS_CHERRYVIEW(uncore->i915)) {
__raw_uncore_write32(uncore, GTFIFOCTL,
__raw_uncore_read32(uncore, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
@@ -515,6 +547,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
void intel_uncore_suspend(struct intel_uncore *uncore)
{
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
+
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&uncore->pmic_bus_access_nb);
@@ -526,21 +561,24 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
{
unsigned int restore_forcewake;
+ if (intel_uncore_unclaimed_mmio(uncore))
+ DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
+
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
+
restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
- __intel_uncore_early_sanitize(uncore, restore_forcewake);
+ forcewake_early_sanitize(uncore, restore_forcewake);
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{
- iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
-}
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
-{
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_sanitize_gt_powersave(dev_priv);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
@@ -601,17 +639,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
- if (!uncore->user_forcewake.count++) {
+ if (!uncore->user_forcewake_count++) {
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
-
- /* Save and disable mmio debugging for the user bypass */
- uncore->user_forcewake.saved_mmio_check =
- uncore->unclaimed_mmio_check;
- uncore->user_forcewake.saved_mmio_debug =
- i915_modparams.mmio_debug;
-
- uncore->unclaimed_mmio_check = 0;
- i915_modparams.mmio_debug = 0;
+ spin_lock(&uncore->debug->lock);
+ mmio_debug_suspend(uncore->debug);
+ spin_unlock(&uncore->debug->lock);
}
spin_unlock_irq(&uncore->lock);
}
@@ -626,15 +658,14 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
- if (!--uncore->user_forcewake.count) {
- if (intel_uncore_unclaimed_mmio(uncore))
- dev_info(uncore_to_i915(uncore)->drm.dev,
- "Invalid mmio detected during user access\n");
+ if (!--uncore->user_forcewake_count) {
+ spin_lock(&uncore->debug->lock);
+ mmio_debug_resume(uncore->debug);
- uncore->unclaimed_mmio_check =
- uncore->user_forcewake.saved_mmio_check;
- i915_modparams.mmio_debug =
- uncore->user_forcewake.saved_mmio_debug;
+ if (check_for_unclaimed_mmio(uncore))
+ dev_info(uncore->i915->drm.dev,
+ "Invalid mmio detected during user access\n");
+ spin_unlock(&uncore->debug->lock);
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
@@ -669,8 +700,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
fw_domains &= uncore->fw_domains;
for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
- if (WARN_ON(domain->wake_count == 0))
- continue;
+ GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count) {
domain->active = true;
@@ -734,15 +764,42 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
+ struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+ return;
+
if (!uncore->funcs.force_wake_get)
return;
+ spin_lock_irq(&uncore->lock);
+
assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
WARN(fw_domains & ~uncore->fw_domains_active,
"Expected %08x fw_domains to be active, but %08x are off\n",
fw_domains, fw_domains & ~uncore->fw_domains_active);
+
+ /*
+ * Check that the caller has an explicit wakeref and we don't mistake
+ * it for the auto wakeref.
+ */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ unsigned int actual = READ_ONCE(domain->wake_count);
+ unsigned int expect = 1;
+
+ if (uncore->fw_domains_timer & domain->mask)
+ expect++; /* pending automatic release */
+
+ if (WARN(actual < expect,
+ "Expected domain %d to be held awake by caller, count=%d\n",
+ domain->id, actual))
+ break;
+ }
+
+ spin_unlock_irq(&uncore->lock);
}
/* We give fast paths for the really cool registers */
@@ -901,6 +958,12 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
+static enum forcewake_domains
+gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ return FORCEWAKE_RENDER;
+}
+
#define __gen8_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
@@ -1049,7 +1112,16 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
if (likely(!i915_modparams.mmio_debug))
return;
+ /* interrupts are disabled and re-enabled around uncore->lock usage */
+ lockdep_assert_held(&uncore->lock);
+
+ if (before)
+ spin_lock(&uncore->debug->lock);
+
__unclaimed_reg_debug(uncore, reg, read, before);
+
+ if (!before)
+ spin_unlock(&uncore->debug->lock);
}
#define GEN2_READ_HEADER(x) \
@@ -1123,8 +1195,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore,
static inline void __force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- if (WARN_ON(!fw_domains))
- return;
+ GEM_BUG_ON(!fw_domains);
/* Turn on all requested but inactive supported forcewake domains. */
fw_domains &= uncore->fw_domains;
@@ -1145,26 +1216,23 @@ func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
-#define __gen6_read(x) __gen_read(gen6, x)
-#define __fwtable_read(x) __gen_read(fwtable, x)
-#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
-
-__gen11_fwtable_read(8)
-__gen11_fwtable_read(16)
-__gen11_fwtable_read(32)
-__gen11_fwtable_read(64)
-__fwtable_read(8)
-__fwtable_read(16)
-__fwtable_read(32)
-__fwtable_read(64)
-__gen6_read(8)
-__gen6_read(16)
-__gen6_read(32)
-__gen6_read(64)
-
-#undef __gen11_fwtable_read
-#undef __fwtable_read
-#undef __gen6_read
+
+#define __gen_reg_read_funcs(func) \
+static enum forcewake_domains \
+func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+ return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_read(func, 8) \
+__gen_read(func, 16) \
+__gen_read(func, 32) \
+__gen_read(func, 64)
+
+__gen_reg_read_funcs(gen11_fwtable);
+__gen_reg_read_funcs(fwtable);
+__gen_reg_read_funcs(gen6);
+
+#undef __gen_reg_read_funcs
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1225,6 +1293,9 @@ gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace)
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
#define __gen_write(func, x) \
static void \
@@ -1237,38 +1308,33 @@ func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trac
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen8_write(x) __gen_write(gen8, x)
-#define __fwtable_write(x) __gen_write(fwtable, x)
-#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
-
-__gen11_fwtable_write(8)
-__gen11_fwtable_write(16)
-__gen11_fwtable_write(32)
-__fwtable_write(8)
-__fwtable_write(16)
-__fwtable_write(32)
-__gen8_write(8)
-__gen8_write(16)
-__gen8_write(32)
-__gen6_write(8)
-__gen6_write(16)
-__gen6_write(32)
-#undef __gen11_fwtable_write
-#undef __fwtable_write
-#undef __gen8_write
-#undef __gen6_write
+#define __gen_reg_write_funcs(func) \
+static enum forcewake_domains \
+func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+ return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_write(func, 8) \
+__gen_write(func, 16) \
+__gen_write(func, 32)
+
+__gen_reg_write_funcs(gen11_fwtable);
+__gen_reg_write_funcs(fwtable);
+__gen_reg_write_funcs(gen8);
+
+#undef __gen_reg_write_funcs
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_writeb = x##_write8; \
(uncore)->funcs.mmio_writew = x##_write16; \
(uncore)->funcs.mmio_writel = x##_write32; \
} while (0)
-#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_readb = x##_read8; \
(uncore)->funcs.mmio_readw = x##_read16; \
@@ -1276,24 +1342,39 @@ do { \
(uncore)->funcs.mmio_readq = x##_read64; \
} while (0)
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+do { \
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
+ (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
+} while (0)
-static void fw_domain_init(struct intel_uncore *uncore,
- enum forcewake_domain_id domain_id,
- i915_reg_t reg_set,
- i915_reg_t reg_ack)
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+do { \
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
+ (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
+} while (0)
+
+static int __fw_domain_init(struct intel_uncore *uncore,
+ enum forcewake_domain_id domain_id,
+ i915_reg_t reg_set,
+ i915_reg_t reg_ack)
{
struct intel_uncore_forcewake_domain *d;
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
- return;
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
+ GEM_BUG_ON(uncore->fw_domain[domain_id]);
- d = &uncore->fw_domain[domain_id];
+ if (i915_inject_probe_failure(uncore->i915))
+ return -ENOMEM;
- WARN_ON(d->wake_count);
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
WARN_ON(!i915_mmio_reg_valid(reg_set));
WARN_ON(!i915_mmio_reg_valid(reg_ack));
+ d->uncore = uncore;
d->wake_count = 0;
d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
@@ -1310,7 +1391,6 @@ static void fw_domain_init(struct intel_uncore *uncore,
BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
-
d->mask = BIT(domain_id);
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -1319,6 +1399,10 @@ static void fw_domain_init(struct intel_uncore *uncore,
uncore->fw_domains |= BIT(domain_id);
fw_domain_reset(d);
+
+ uncore->fw_domain[domain_id] = d;
+
+ return 0;
}
static void fw_domain_fini(struct intel_uncore *uncore,
@@ -1326,30 +1410,41 @@ static void fw_domain_fini(struct intel_uncore *uncore,
{
struct intel_uncore_forcewake_domain *d;
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
- return;
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
- d = &uncore->fw_domain[domain_id];
+ d = fetch_and_zero(&uncore->fw_domain[domain_id]);
+ if (!d)
+ return;
+ uncore->fw_domains &= ~BIT(domain_id);
WARN_ON(d->wake_count);
WARN_ON(hrtimer_cancel(&d->timer));
- memset(d, 0, sizeof(*d));
+ kfree(d);
+}
- uncore->fw_domains &= ~BIT(domain_id);
+static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
+{
+ struct intel_uncore_forcewake_domain *d;
+ int tmp;
+
+ for_each_fw_domain(d, uncore, tmp)
+ fw_domain_fini(uncore, d->id);
}
-static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
+static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret = 0;
- if (!intel_uncore_has_forcewake(uncore))
- return;
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
+
+#define fw_domain_init(uncore__, id__, set__, ack__) \
+ (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
if (INTEL_GEN(i915) >= 11) {
int i;
- uncore->funcs.force_wake_get =
- fw_domains_get_with_fallback;
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1357,6 +1452,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
+
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(i915, _VCS(i)))
continue;
@@ -1374,8 +1470,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
} else if (IS_GEN_RANGE(i915, 9, 10)) {
- uncore->funcs.force_wake_get =
- fw_domains_get_with_fallback;
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1424,8 +1519,10 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
__raw_uncore_write32(uncore, FORCEWAKE, 0);
__raw_posting_read(uncore, ECOBUS);
- fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
- FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+ ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+ if (ret)
+ goto out;
spin_lock_irq(&uncore->lock);
fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
@@ -1436,6 +1533,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
+ fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
@@ -1447,8 +1545,16 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE, FORCEWAKE_ACK);
}
+#undef fw_domain_init
+
/* All future platforms are expected to require complex power gating */
- WARN_ON(uncore->fw_domains == 0);
+ WARN_ON(!ret && uncore->fw_domains == 0);
+
+out:
+ if (ret)
+ intel_uncore_fw_domains_fini(uncore);
+
+ return ret;
}
#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
@@ -1493,7 +1599,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static int uncore_mmio_setup(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
struct pci_dev *pdev = i915->drm.pdev;
int mmio_bar;
int mmio_size;
@@ -1523,49 +1629,46 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
static void uncore_mmio_cleanup(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = uncore->i915->drm.pdev;
pci_iounmap(pdev, uncore->regs);
}
-void intel_uncore_init_early(struct intel_uncore *uncore)
+void intel_uncore_init_early(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
{
spin_lock_init(&uncore->lock);
+ uncore->i915 = i915;
+ uncore->rpm = &i915->runtime_pm;
+ uncore->debug = &i915->mmio_debug;
}
-int intel_uncore_init_mmio(struct intel_uncore *uncore)
+static void uncore_raw_init(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- int ret;
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
- ret = uncore_mmio_setup(uncore);
- if (ret)
- return ret;
-
- i915_check_vgpu(i915);
-
- if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
- uncore->flags |= UNCORE_HAS_FORCEWAKE;
+ if (IS_GEN(uncore->i915, 5)) {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
+ } else {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
+ }
+}
- intel_uncore_fw_domains_init(uncore);
- __intel_uncore_early_sanitize(uncore, 0);
+static int uncore_forcewake_init(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret;
- uncore->unclaimed_mmio_check = 1;
- uncore->pmic_bus_access_nb.notifier_call =
- i915_pmic_bus_access_notifier;
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
- uncore->rpm = &i915->runtime_pm;
+ ret = intel_uncore_fw_domains_init(uncore);
+ if (ret)
+ return ret;
+ forcewake_early_sanitize(uncore, 0);
- if (!intel_uncore_has_forcewake(uncore)) {
- if (IS_GEN(i915, 5)) {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
- } else {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
- }
- } else if (IS_GEN_RANGE(i915, 6, 7)) {
+ if (IS_GEN_RANGE(i915, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
if (IS_VALLEYVIEW(i915)) {
@@ -1579,7 +1682,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
-
} else {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
@@ -1594,6 +1696,38 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
}
+ uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+ return 0;
+}
+
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret;
+
+ ret = uncore_mmio_setup(uncore);
+ if (ret)
+ return ret;
+
+ if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+ uncore->flags |= UNCORE_HAS_FORCEWAKE;
+
+ if (!intel_uncore_has_forcewake(uncore)) {
+ uncore_raw_init(uncore);
+ } else {
+ ret = uncore_forcewake_init(uncore);
+ if (ret)
+ goto out_mmio_cleanup;
+ }
+
+ /* make sure fw funcs are set if and only if we have fw*/
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
+
if (HAS_FPGA_DBG_UNCLAIMED(i915))
uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
@@ -1603,9 +1737,16 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (IS_GEN_RANGE(i915, 6, 7))
uncore->flags |= UNCORE_HAS_FIFO;
- iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+ /* clear out unclaimed reg detection bit */
+ if (intel_uncore_unclaimed_mmio(uncore))
+ DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
return 0;
+
+out_mmio_cleanup:
+ uncore_mmio_cleanup(uncore);
+
+ return ret;
}
/*
@@ -1615,45 +1756,46 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
*/
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
+ enum forcewake_domains fw_domains = uncore->fw_domains;
+ enum forcewake_domain_id domain_id;
+ int i;
- if (INTEL_GEN(i915) >= 11) {
- enum forcewake_domains fw_domains = uncore->fw_domains;
- enum forcewake_domain_id domain_id;
- int i;
+ if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
+ return;
- for (i = 0; i < I915_MAX_VCS; i++) {
- domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(i915, _VCS(i)))
- continue;
+ if (HAS_ENGINE(i915, _VCS(i)))
+ continue;
- if (fw_domains & BIT(domain_id))
- fw_domain_fini(uncore, domain_id);
- }
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(uncore, domain_id);
+ }
- for (i = 0; i < I915_MAX_VECS; i++) {
- domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(i915, _VECS(i)))
- continue;
+ if (HAS_ENGINE(i915, _VECS(i)))
+ continue;
- if (fw_domains & BIT(domain_id))
- fw_domain_fini(uncore, domain_id);
- }
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(uncore, domain_id);
}
}
void intel_uncore_fini_mmio(struct intel_uncore *uncore)
{
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(uncore_to_i915(uncore));
+ if (intel_uncore_has_forcewake(uncore)) {
+ iosf_mbi_punit_acquire();
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ &uncore->pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(uncore);
+ intel_uncore_fw_domains_fini(uncore);
+ iosf_mbi_punit_release();
+ }
- iosf_mbi_punit_acquire();
- iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &uncore->pmic_bus_access_nb);
- intel_uncore_forcewake_reset(uncore);
- iosf_mbi_punit_release();
uncore_mmio_cleanup(uncore);
}
@@ -1665,7 +1807,7 @@ static const struct reg_whitelist {
} reg_read_whitelist[] = { {
.offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
- .gen_mask = INTEL_GEN_MASK(4, 11),
+ .gen_mask = INTEL_GEN_MASK(4, 12),
.size = 8
} };
@@ -1749,7 +1891,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
* wish to wait without holding forcewake for the duration (i.e. you expect
* the wait to be slow).
*
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
@@ -1797,7 +1939,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
*
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int __intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
@@ -1841,7 +1983,13 @@ int __intel_wait_for_register(struct intel_uncore *uncore,
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
- return check_for_unclaimed_mmio(uncore);
+ bool ret;
+
+ spin_lock_irq(&uncore->debug->lock);
+ ret = check_for_unclaimed_mmio(uncore);
+ spin_unlock_irq(&uncore->debug->lock);
+
+ return ret;
}
bool
@@ -1849,84 +1997,28 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
- spin_lock_irq(&uncore->lock);
+ spin_lock_irq(&uncore->debug->lock);
- if (unlikely(uncore->unclaimed_mmio_check <= 0))
+ if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
goto out;
- if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
+ if (unlikely(check_for_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
- uncore->unclaimed_mmio_check--;
+ uncore->debug->unclaimed_mmio_check--;
ret = true;
}
out:
- spin_unlock_irq(&uncore->lock);
+ spin_unlock_irq(&uncore->debug->lock);
return ret;
}
-static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
- i915_reg_t reg)
-{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- u32 offset = i915_mmio_reg_offset(reg);
- enum forcewake_domains fw_domains;
-
- if (INTEL_GEN(i915) >= 11) {
- fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
- } else if (HAS_FWTABLE(i915)) {
- fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
- } else if (INTEL_GEN(i915) >= 6) {
- fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
- } else {
- /* on devices with FW we expect to hit one of the above cases */
- if (intel_uncore_has_forcewake(uncore))
- MISSING_CASE(INTEL_GEN(i915));
-
- fw_domains = 0;
- }
-
- WARN_ON(fw_domains & ~uncore->fw_domains);
-
- return fw_domains;
-}
-
-static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
- i915_reg_t reg)
-{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- u32 offset = i915_mmio_reg_offset(reg);
- enum forcewake_domains fw_domains;
-
- if (INTEL_GEN(i915) >= 11) {
- fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
- } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
- fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
- } else if (IS_GEN(i915, 8)) {
- fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
- } else if (IS_GEN_RANGE(i915, 6, 7)) {
- fw_domains = FORCEWAKE_RENDER;
- } else {
- /* on devices with FW we expect to hit one of the above cases */
- if (intel_uncore_has_forcewake(uncore))
- MISSING_CASE(INTEL_GEN(i915));
-
- fw_domains = 0;
- }
-
- WARN_ON(fw_domains & ~uncore->fw_domains);
-
- return fw_domains;
-}
-
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
@@ -1953,10 +2045,12 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
return 0;
if (op & FW_REG_READ)
- fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
+ fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
if (op & FW_REG_WRITE)
- fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
+ fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
+
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 804a0faacc91..414fc2cb0459 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -36,6 +36,13 @@ struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
+struct intel_uncore_mmio_debug {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
+ int unclaimed_mmio_check;
+ int saved_mmio_check;
+ u32 suspend_count;
+};
+
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
FW_DOMAIN_ID_BLITTER,
@@ -70,6 +77,11 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct intel_uncore *uncore,
enum forcewake_domains domains);
+ enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
+ i915_reg_t r);
+ enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
+ i915_reg_t r);
+
u8 (*mmio_readb)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
u16 (*mmio_readw)(struct intel_uncore *uncore,
@@ -97,6 +109,7 @@ struct intel_forcewake_range {
struct intel_uncore {
void __iomem *regs;
+ struct drm_i915_private *i915;
struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -117,9 +130,11 @@ struct intel_uncore {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ enum forcewake_domains fw_domains_timer;
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
struct intel_uncore_forcewake_domain {
+ struct intel_uncore *uncore;
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned int wake_count;
@@ -127,32 +142,21 @@ struct intel_uncore {
struct hrtimer timer;
u32 __iomem *reg_set;
u32 __iomem *reg_ack;
- } fw_domain[FW_DOMAIN_ID_COUNT];
-
- struct {
- unsigned int count;
+ } *fw_domain[FW_DOMAIN_ID_COUNT];
- int saved_mmio_check;
- int saved_mmio_debug;
- } user_forcewake;
+ unsigned int user_forcewake_count;
- int unclaimed_mmio_check;
+ struct intel_uncore_mmio_debug *debug;
};
/* Iterate over initialised fw domains */
#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
- for (tmp__ = (mask__); \
- tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+ for (tmp__ = (mask__); tmp__ ;) \
+ for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
#define for_each_fw_domain(domain__, uncore__, tmp__) \
for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
-static inline struct intel_uncore *
-forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
-{
- return container_of(d, struct intel_uncore, fw_domain[d->id]);
-}
-
static inline bool
intel_uncore_has_forcewake(const struct intel_uncore *uncore)
{
@@ -177,8 +181,10 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init_early(struct intel_uncore *uncore);
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
+void intel_uncore_init_early(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
@@ -391,6 +397,18 @@ static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
intel_uncore_write_fw(uncore, reg, val);
}
+static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 val,
+ u32 mask, u32 expected_val)
+{
+ u32 reg_val;
+
+ intel_uncore_write(uncore, reg, val);
+ reg_val = intel_uncore_read(uncore, reg);
+
+ return (reg_val & mask) != expected_val ? -EINVAL : 0;
+}
+
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 3db6fa682823..868cc78048d0 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -4,25 +4,25 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/wait_bit.h>
+
#include "intel_runtime_pm.h"
-#include "i915_gem.h"
+#include "intel_wakeref.h"
-static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_get(struct intel_wakeref *wf)
{
- wf->wakeref = intel_runtime_pm_get(rpm);
+ wf->wakeref = intel_runtime_pm_get(wf->rpm);
}
-static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_put(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
- intel_runtime_pm_put(rpm, wakeref);
- GEM_BUG_ON(!wakeref);
+ intel_runtime_pm_put(wf->rpm, wakeref);
+ INTEL_WAKEREF_BUG_ON(!wakeref);
}
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+int __intel_wakeref_get_first(struct intel_wakeref *wf)
{
/*
* Treat get/put as different subclasses, as we may need to run
@@ -34,11 +34,11 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
if (!atomic_read(&wf->count)) {
int err;
- rpm_get(rpm, wf);
+ rpm_get(wf);
- err = fn(wf);
+ err = wf->ops->get(wf);
if (unlikely(err)) {
- rpm_put(rpm, wf);
+ rpm_put(wf);
mutex_unlock(&wf->mutex);
return err;
}
@@ -48,30 +48,69 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
atomic_inc(&wf->count);
mutex_unlock(&wf->mutex);
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
return 0;
}
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
- int err;
+ if (!atomic_dec_and_test(&wf->count))
+ goto unlock;
+
+ /* ops->put() must reschedule its own release on error/deferral */
+ if (likely(!wf->ops->put(wf))) {
+ rpm_put(wf);
+ wake_up_var(&wf->wakeref);
+ }
- err = fn(wf);
- if (likely(!err))
- rpm_put(rpm, wf);
- else
- atomic_inc(&wf->count);
+unlock:
mutex_unlock(&wf->mutex);
+}
+
+void __intel_wakeref_put_last(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
+
+ /* Assume we are not in process context and so cannot sleep. */
+ if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
+ !mutex_trylock(&wf->mutex)) {
+ schedule_work(&wf->work);
+ return;
+ }
+
+ ____intel_wakeref_put_last(wf);
+}
+
+static void __intel_wakeref_put_work(struct work_struct *wrk)
+{
+ struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
- return err;
+ if (atomic_add_unless(&wf->count, -1, 1))
+ return;
+
+ mutex_lock(&wf->mutex);
+ ____intel_wakeref_put_last(wf);
}
-void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
+void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
+ struct lock_class_key *key)
{
+ wf->rpm = rpm;
+ wf->ops = ops;
+
__mutex_init(&wf->mutex, "wakeref", key);
atomic_set(&wf->count, 0);
wf->wakeref = 0;
+
+ INIT_WORK(&wf->work, __intel_wakeref_put_work);
+}
+
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
+{
+ return wait_var_event_killable(&wf->wakeref,
+ !intel_wakeref_is_active(wf));
}
static void wakeref_auto_timeout(struct timer_list *t)
@@ -115,7 +154,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
if (!refcount_inc_not_zero(&wf->count)) {
spin_lock_irqsave(&wf->lock, flags);
if (!refcount_inc_not_zero(&wf->count)) {
- GEM_BUG_ON(wf->wakeref);
+ INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
refcount_set(&wf->count, 1);
}
@@ -134,5 +173,5 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
{
intel_wakeref_auto(wf, 0);
- GEM_BUG_ON(wf->wakeref);
+ INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 38275310b196..5f0c972a80fb 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -8,35 +8,56 @@
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
+#else
+#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
struct intel_runtime_pm;
+struct intel_wakeref;
typedef depot_stack_handle_t intel_wakeref_t;
+struct intel_wakeref_ops {
+ int (*get)(struct intel_wakeref *wf);
+ int (*put)(struct intel_wakeref *wf);
+
+ unsigned long flags;
+#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+};
+
struct intel_wakeref {
atomic_t count;
struct mutex mutex;
+
intel_wakeref_t wakeref;
+
+ struct intel_runtime_pm *rpm;
+ const struct intel_wakeref_ops *ops;
+
+ struct work_struct work;
};
void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
struct lock_class_key *key);
-#define intel_wakeref_init(wf) do { \
+#define intel_wakeref_init(wf, rpm, ops) do { \
static struct lock_class_key __key; \
\
- __intel_wakeref_init((wf), &__key); \
+ __intel_wakeref_init((wf), (rpm), (ops), &__key); \
} while (0)
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
+int __intel_wakeref_get_first(struct intel_wakeref *wf);
+void __intel_wakeref_put_last(struct intel_wakeref *wf);
/**
* intel_wakeref_get: Acquire the wakeref
@@ -55,12 +76,10 @@ int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
* code otherwise.
*/
static inline int
-intel_wakeref_get(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+intel_wakeref_get(struct intel_wakeref *wf)
{
if (unlikely(!atomic_inc_not_zero(&wf->count)))
- return __intel_wakeref_get_first(rpm, wf, fn);
+ return __intel_wakeref_get_first(wf);
return 0;
}
@@ -96,15 +115,12 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
* Returns: 0 if the wakeref was released successfully, or a negative error
* code otherwise.
*/
-static inline int
-intel_wakeref_put(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static inline void
+intel_wakeref_put(struct intel_wakeref *wf)
{
- if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
- return __intel_wakeref_put_last(rpm, wf, fn);
-
- return 0;
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+ if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
+ __intel_wakeref_put_last(wf);
}
/**
@@ -136,17 +152,41 @@ intel_wakeref_unlock(struct intel_wakeref *wf)
}
/**
- * intel_wakeref_active: Query whether the wakeref is currently held
+ * intel_wakeref_is_active: Query whether the wakeref is currently held
* @wf: the wakeref
*
* Returns: true if the wakeref is currently held.
*/
static inline bool
-intel_wakeref_active(struct intel_wakeref *wf)
+intel_wakeref_is_active(const struct intel_wakeref *wf)
{
return READ_ONCE(wf->wakeref);
}
+/**
+ * __intel_wakeref_defer_park: Defer the current park callback
+ * @wf: the wakeref
+ */
+static inline void
+__intel_wakeref_defer_park(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
+ atomic_set_release(&wf->count, 1);
+}
+
+/**
+ * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
+ * @wf: the wakeref
+ *
+ * Wait for the earlier asynchronous release of the wakeref. Note
+ * this will wait for any third party as well, so make sure you only wait
+ * when you have control over the wakeref and trust no one else is acquiring
+ * it.
+ *
+ * Return: 0 on success, error code if killed.
+ */
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+
struct intel_wakeref_auto {
struct intel_runtime_pm *rpm;
struct timer_list timer;
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 7b4ba84b9fb8..2bb9f9f9a50a 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
+ * Copyright © 2017-2019 Intel Corporation
*/
#include "intel_wopcm.h"
@@ -64,6 +63,11 @@
#define GEN9_GUC_FW_RESERVED SZ_128K
#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
+static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
+{
+ return container_of(wopcm, struct drm_i915_private, wopcm);
+}
+
/**
* intel_wopcm_init_early() - Early initialization of the WOPCM.
* @wopcm: pointer to intel_wopcm.
@@ -74,7 +78,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- if (!HAS_GUC(i915))
+ if (!HAS_GT_UC(i915))
return;
if (INTEL_GEN(i915) >= 11)
@@ -82,7 +86,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
else
wopcm->size = GEN9_WOPCM_SIZE;
- DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024);
}
static inline u32 context_reserved_size(struct drm_i915_private *i915)
@@ -95,7 +99,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915)
return 0;
}
-static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
+static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size)
{
u32 offset;
@@ -107,16 +112,18 @@ static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
if (offset > guc_wopcm_size ||
(guc_wopcm_size - offset) < sizeof(u32)) {
- DRM_ERROR("GuC WOPCM size %uKiB is too small. %uKiB needed.\n",
- guc_wopcm_size / 1024,
- (u32)(offset + sizeof(u32)) / 1024);
- return -E2BIG;
+ dev_err(i915->drm.dev,
+ "WOPCM: invalid GuC region size: %uK < %uK\n",
+ guc_wopcm_size / SZ_1K,
+ (u32)(offset + sizeof(u32)) / SZ_1K);
+ return false;
}
- return 0;
+ return true;
}
-static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
+static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
+ u32 guc_wopcm_size, u32 huc_fw_size)
{
/*
* On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
@@ -124,29 +131,81 @@ static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
* firmware uploading would fail.
*/
if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
- DRM_ERROR("HuC FW (%uKiB) won't fit in GuC WOPCM (%uKiB).\n",
- huc_fw_size / 1024,
- (guc_wopcm_size - GUC_WOPCM_RESERVED) / 1024);
- return -E2BIG;
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ (guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
+ huc_fw_size / 1024);
+ return false;
}
- return 0;
+ return true;
+}
+
+static inline bool check_hw_restrictions(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 huc_fw_size)
+{
+ if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
+ guc_wopcm_size))
+ return false;
+
+ if ((IS_GEN(i915, 9) ||
+ IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)) &&
+ !gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
+ return false;
+
+ return true;
}
-static inline int check_hw_restriction(struct drm_i915_private *i915,
- u32 guc_wopcm_base, u32 guc_wopcm_size,
- u32 huc_fw_size)
+static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 guc_fw_size, u32 huc_fw_size)
{
- int err = 0;
+ const u32 ctx_rsvd = context_reserved_size(i915);
+ u32 size;
+
+ size = wopcm_size - ctx_rsvd;
+ if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
+ dev_err(i915->drm.dev,
+ "WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
+ size / SZ_1K);
+ return false;
+ }
+
+ size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
+ if (unlikely(guc_wopcm_size < size)) {
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
+ guc_wopcm_size / SZ_1K, size / SZ_1K);
+ return false;
+ }
- if (IS_GEN(i915, 9))
- err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
+ size = huc_fw_size + WOPCM_RESERVED_SIZE;
+ if (unlikely(guc_wopcm_base < size)) {
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ guc_wopcm_base / SZ_1K, size / SZ_1K);
+ return false;
+ }
+
+ return check_hw_restrictions(i915, guc_wopcm_base, guc_wopcm_size,
+ huc_fw_size);
+}
- if (!err &&
- (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
- err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
+static bool __wopcm_regs_locked(struct intel_uncore *uncore,
+ u32 *guc_wopcm_base, u32 *guc_wopcm_size)
+{
+ u32 reg_base = intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET);
+ u32 reg_size = intel_uncore_read(uncore, GUC_WOPCM_SIZE);
+
+ if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
+ !(reg_base & GUC_WOPCM_OFFSET_VALID))
+ return false;
- return err;
+ *guc_wopcm_base = reg_base & GUC_WOPCM_OFFSET_MASK;
+ *guc_wopcm_size = reg_size & GUC_WOPCM_SIZE_MASK;
+ return true;
}
/**
@@ -156,135 +215,66 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
* This function will partition WOPCM space based on GuC and HuC firmware sizes
* and will allocate max remaining for use by GuC. This function will also
* enforce platform dependent hardware restrictions on GuC WOPCM offset and
- * size. It will fail the WOPCM init if any of these checks were failed, so that
- * the following GuC firmware uploading would be aborted.
- *
- * Return: 0 on success, non-zero error code on failure.
+ * size. It will fail the WOPCM init if any of these checks fail, so that the
+ * following WOPCM registers setup and GuC firmware uploading would be aborted.
*/
-int intel_wopcm_init(struct intel_wopcm *wopcm)
+void intel_wopcm_init(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw);
- u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw);
+ struct intel_gt *gt = &i915->gt;
+ u32 guc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.guc.fw);
+ u32 huc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
u32 guc_wopcm_base;
u32 guc_wopcm_size;
- u32 guc_wopcm_rsvd;
- int err;
- if (!USES_GUC(i915))
- return 0;
+ if (!guc_fw_size)
+ return;
GEM_BUG_ON(!wopcm->size);
+ GEM_BUG_ON(wopcm->guc.base);
+ GEM_BUG_ON(wopcm->guc.size);
+ GEM_BUG_ON(guc_fw_size >= wopcm->size);
+ GEM_BUG_ON(huc_fw_size >= wopcm->size);
+ GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
- if (i915_inject_load_failure())
- return -E2BIG;
+ if (i915_inject_probe_failure(i915))
+ return;
- if (guc_fw_size >= wopcm->size) {
- DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.",
- guc_fw_size / 1024);
- return -E2BIG;
+ if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
+ "GuC WOPCM is already locked [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K,
+ guc_wopcm_size / SZ_1K);
+ goto check;
}
- if (huc_fw_size >= wopcm->size) {
- DRM_ERROR("HuC FW (%uKiB) is too big to fit in WOPCM.",
- huc_fw_size / 1024);
- return -E2BIG;
- }
+ /*
+ * Aligned value of guc_wopcm_base will determine available WOPCM space
+ * for HuC firmware and mandatory reserved area.
+ */
+ guc_wopcm_base = huc_fw_size + WOPCM_RESERVED_SIZE;
+ guc_wopcm_base = ALIGN(guc_wopcm_base, GUC_WOPCM_OFFSET_ALIGNMENT);
- guc_wopcm_base = ALIGN(huc_fw_size + WOPCM_RESERVED_SIZE,
- GUC_WOPCM_OFFSET_ALIGNMENT);
- if ((guc_wopcm_base + ctx_rsvd) >= wopcm->size) {
- DRM_ERROR("GuC WOPCM base (%uKiB) is too big.\n",
- guc_wopcm_base / 1024);
- return -E2BIG;
- }
+ /*
+ * Need to clamp guc_wopcm_base now to make sure the following math is
+ * correct. Formal check of whole WOPCM layout will be done below.
+ */
+ guc_wopcm_base = min(guc_wopcm_base, wopcm->size - ctx_rsvd);
- guc_wopcm_size = wopcm->size - guc_wopcm_base - ctx_rsvd;
+ /* Aligned remainings of usable WOPCM space can be assigned to GuC. */
+ guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
- DRM_DEBUG_DRIVER("Calculated GuC WOPCM Region: [%uKiB, %uKiB)\n",
- guc_wopcm_base / 1024, guc_wopcm_size / 1024);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
- guc_wopcm_rsvd = GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
- if ((guc_fw_size + guc_wopcm_rsvd) > guc_wopcm_size) {
- DRM_ERROR("Need %uKiB WOPCM for GuC, %uKiB available.\n",
- (guc_fw_size + guc_wopcm_rsvd) / 1024,
- guc_wopcm_size / 1024);
- return -E2BIG;
+check:
+ if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
+ guc_fw_size, huc_fw_size)) {
+ wopcm->guc.base = guc_wopcm_base;
+ wopcm->guc.size = guc_wopcm_size;
+ GEM_BUG_ON(!wopcm->guc.base);
+ GEM_BUG_ON(!wopcm->guc.size);
}
-
- err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size,
- huc_fw_size);
- if (err)
- return err;
-
- wopcm->guc.base = guc_wopcm_base;
- wopcm->guc.size = guc_wopcm_size;
-
- return 0;
-}
-
-static inline int write_and_verify(struct drm_i915_private *dev_priv,
- i915_reg_t reg, u32 val, u32 mask,
- u32 locked_bit)
-{
- u32 reg_val;
-
- GEM_BUG_ON(val & ~mask);
-
- I915_WRITE(reg, val);
-
- reg_val = I915_READ(reg);
-
- return (reg_val & mask) != (val | locked_bit) ? -EIO : 0;
-}
-
-/**
- * intel_wopcm_init_hw() - Setup GuC WOPCM registers.
- * @wopcm: pointer to intel_wopcm.
- *
- * Setup the GuC WOPCM size and offset registers with the calculated values. It
- * will verify the register values to make sure the registers are locked with
- * correct values.
- *
- * Return: 0 on success. -EIO if registers were locked with incorrect values.
- */
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
-{
- struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm);
- u32 huc_agent;
- u32 mask;
- int err;
-
- if (!USES_GUC(dev_priv))
- return 0;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
- GEM_BUG_ON(!wopcm->guc.size);
- GEM_BUG_ON(!wopcm->guc.base);
-
- err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size,
- GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
- GUC_WOPCM_SIZE_LOCKED);
- if (err)
- goto err_out;
-
- huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0;
- mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
- err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET,
- wopcm->guc.base | huc_agent, mask,
- GUC_WOPCM_OFFSET_VALID);
- if (err)
- goto err_out;
-
- return 0;
-
-err_out:
- DRM_ERROR("Failed to init WOPCM registers:\n");
- DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n",
- I915_READ(DMA_GUC_WOPCM_OFFSET));
- DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE));
-
- return err;
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 114401971520..17d6aa86008a 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -25,6 +25,21 @@ struct intel_wopcm {
};
/**
+ * intel_wopcm_guc_base()
+ * @wopcm: intel_wopcm structure
+ *
+ * Returns the base of the WOPCM shadowed region.
+ *
+ * Returns:
+ * 0 if GuC is not present or not in use.
+ * Otherwise, the GuC WOPCM base.
+ */
+static inline u32 intel_wopcm_guc_base(struct intel_wopcm *wopcm)
+{
+ return wopcm->guc.base;
+}
+
+/**
* intel_wopcm_guc_size()
* @wopcm: intel_wopcm structure
*
@@ -40,7 +55,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm)
}
void intel_wopcm_init_early(struct intel_wopcm *wopcm);
-int intel_wopcm_init(struct intel_wopcm *wopcm);
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
+void intel_wopcm_init(struct intel_wopcm *wopcm);
#endif
diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile
new file mode 100644
index 000000000000..df028e2b0d64
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
+# Extra header tests
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
index 4acdb94555b7..14da5c3b569d 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -66,26 +65,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"d6de6f55-e526-4f79-a6a6-d7315c09044e",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
new file mode 100644
index 000000000000..0cee3334f0a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_BDW_H__
+#define __I915_OA_BDW_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
index a44195c39923..3e785bafcf99 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"5ee72f5c-092f-421e-8b70-225f7c3e9612",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
new file mode 100644
index 000000000000..0bdf391323ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_BXT_H__
+#define __I915_OA_BXT_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
index 7f60d51b8761..0ea86f70a06c 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
new file mode 100644
index 000000000000..6b862280ab78
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CFLGT2_H__
+#define __I915_OA_CFLGT2_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
index a92c38e3a0ce..fc632dd890bf 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
new file mode 100644
index 000000000000..4ca9d8f89b2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CFLGT3_H__
+#define __I915_OA_CFLGT3_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
index 71ec889a0114..6cd4e9921a8a 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"4a534b07-cba3-414d-8d60-874830e883aa",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
new file mode 100644
index 000000000000..3cac7bbc9c71
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CHV_H__
+#define __I915_OA_CHV_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
index 5c23d883d6c9..1041e8914993 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -77,26 +76,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
new file mode 100644
index 000000000000..db379f5fcbb9
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_CNL_H__
+#define __I915_OA_CNL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
index 4bdda66df7d2..bd15ebe9aeeb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
new file mode 100644
index 000000000000..779f343efd11
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_GLK_H__
+#define __I915_OA_GLK_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
index cc6526fdd2bd..133721a8619f 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -94,26 +93,26 @@ show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *b
void
i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"403d8832-1a27-4aa6-a64e-f5389ce7b212",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
+ dev_priv->perf.test_config.mux_regs = mux_config_render_basic;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
new file mode 100644
index 000000000000..ba97f732f136
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_HSW_H__
+#define __I915_OA_HSW_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
index baa51427a543..2d92041b754f 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -74,26 +73,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"a291665e-244b-4b76-9b9a-01de9d3c8068",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
new file mode 100644
index 000000000000..5c64112d720e
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_ICL_H__
+#define __I915_OA_ICL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
index 168e49ab0d4d..1c3a67c9cfe0 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"baa3c7e4-52b6-4b85-801e-465a94b746dd",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
new file mode 100644
index 000000000000..810532fa6b63
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_KBLGT2_H__
+#define __I915_OA_KBLGT2_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
index 6ffa553c388e..ebbe5a9c9fdc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"f1792f32-6db2-4b50-b4b2-557128f1688d",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
new file mode 100644
index 000000000000..13d70456fabd
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_KBLGT3_H__
+#define __I915_OA_KBLGT3_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
index 7ce6ee851d43..1bc359ed34e8 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
new file mode 100644
index 000000000000..fda70c51a6ec
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_SKLGT2_H__
+#define __I915_OA_SKLGT2_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
index 086ca2631e1c..6e352f881310 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"2b985803-d3c9-4629-8a4f-634bfecba0e8",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
new file mode 100644
index 000000000000..df74eba5799e
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_SKLGT3_H__
+#define __I915_OA_SKLGT3_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
index b291a6eb8a87..8f345115a306 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"882fa433-1f4a-4a67-a962-c741888fe5f5",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
new file mode 100644
index 000000000000..378ab7ab78d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018-2019 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_SKLGT4_H__
+#define __I915_OA_SKLGT4_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index c0b3537a5fa6..77d844ac8b71 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,7 +4,10 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/kref.h>
+
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -13,37 +16,86 @@
struct live_active {
struct i915_active base;
+ struct kref ref;
bool retired;
};
-static void __live_active_retire(struct i915_active *base)
+static void __live_get(struct live_active *active)
+{
+ kref_get(&active->ref);
+}
+
+static void __live_free(struct live_active *active)
+{
+ i915_active_fini(&active->base);
+ kfree(active);
+}
+
+static void __live_release(struct kref *ref)
+{
+ struct live_active *active = container_of(ref, typeof(*active), ref);
+
+ __live_free(active);
+}
+
+static void __live_put(struct live_active *active)
+{
+ kref_put(&active->ref, __live_release);
+}
+
+static int __live_active(struct i915_active *base)
+{
+ struct live_active *active = container_of(base, typeof(*active), base);
+
+ __live_get(active);
+ return 0;
+}
+
+static void __live_retire(struct i915_active *base)
{
struct live_active *active = container_of(base, typeof(*active), base);
active->retired = true;
+ __live_put(active);
+}
+
+static struct live_active *__live_alloc(struct drm_i915_private *i915)
+{
+ struct live_active *active;
+
+ active = kzalloc(sizeof(*active), GFP_KERNEL);
+ if (!active)
+ return NULL;
+
+ kref_init(&active->ref);
+ i915_active_init(i915, &active->base, __live_active, __live_retire);
+
+ return active;
}
-static int __live_active_setup(struct drm_i915_private *i915,
- struct live_active *active)
+static struct live_active *
+__live_active_setup(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_sw_fence *submit;
+ struct live_active *active;
enum intel_engine_id id;
unsigned int count = 0;
int err = 0;
- submit = heap_fence_create(GFP_KERNEL);
- if (!submit)
- return -ENOMEM;
+ active = __live_alloc(i915);
+ if (!active)
+ return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &active->base, __live_active_retire);
- active->retired = false;
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ kfree(active);
+ return ERR_PTR(-ENOMEM);
+ }
- if (!i915_active_acquire(&active->base)) {
- pr_err("First i915_active_acquire should report being idle\n");
- err = -EINVAL;
+ err = i915_active_acquire(&active->base);
+ if (err)
goto out;
- }
for_each_engine(engine, i915, id) {
struct i915_request *rq;
@@ -58,8 +110,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
submit,
GFP_KERNEL);
if (err >= 0)
- err = i915_active_ref(&active->base,
- rq->fence.context, rq);
+ err = i915_active_ref(&active->base, rq->timeline, rq);
i915_request_add(rq);
if (err) {
pr_err("Failed to track active ref!\n");
@@ -74,74 +125,92 @@ static int __live_active_setup(struct drm_i915_private *i915,
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
- if (active->base.count != count) {
+ if (atomic_read(&active->base.count) != count) {
pr_err("i915_active not tracking all requests, found %d, expected %d\n",
- active->base.count, count);
+ atomic_read(&active->base.count), count);
err = -EINVAL;
}
out:
i915_sw_fence_commit(submit);
heap_fence_put(submit);
+ if (err) {
+ __live_put(active);
+ active = ERR_PTR(err);
+ }
- return err;
+ return active;
}
static int live_active_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
+ struct live_active *active;
intel_wakeref_t wakeref;
- int err;
+ int err = 0;
/* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- err = __live_active_setup(i915, &active);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto err;
+ }
- i915_active_wait(&active.base);
- if (!active.retired) {
+ i915_active_wait(&active->base);
+ if (!active->retired) {
pr_err("i915_active not retired after waiting!\n");
err = -EINVAL;
}
- i915_active_fini(&active.base);
+ __live_put(active);
+
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
+err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
+
return err;
}
static int live_active_retire(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
+ struct live_active *active;
intel_wakeref_t wakeref;
- int err;
+ int err = 0;
/* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- err = __live_active_setup(i915, &active);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto err;
+ }
/* waits for & retires all requests */
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- if (!active.retired) {
+ if (!active->retired) {
pr_err("i915_active not retired after flushing!\n");
err = -EINVAL;
}
- i915_active_fini(&active.base);
+ __live_put(active);
+
+err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
+
return err;
}
@@ -152,7 +221,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
new file mode 100644
index 000000000000..23f784eae1e7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#define SZ_8G (1ULL << 33)
+
+static void __igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ bool buddy)
+{
+ pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
+ block->header,
+ i915_buddy_block_state(block),
+ i915_buddy_block_order(block),
+ i915_buddy_block_offset(block),
+ i915_buddy_block_size(mm, block),
+ yesno(!block->parent),
+ yesno(buddy));
+}
+
+static void igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+
+ __igt_dump_block(mm, block, false);
+
+ buddy = get_buddy(block);
+ if (buddy)
+ __igt_dump_block(mm, buddy, true);
+}
+
+static int igt_check_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = i915_buddy_block_state(block);
+
+ if (block_state != I915_BUDDY_ALLOCATED &&
+ block_state != I915_BUDDY_FREE &&
+ block_state != I915_BUDDY_SPLIT) {
+ pr_err("block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = i915_buddy_block_size(mm, block);
+ offset = i915_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ pr_err("block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ if (!is_power_of_2(block_size)) {
+ pr_err("block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ pr_err("block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ pr_err("block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ pr_err("block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = get_buddy(block);
+
+ if (!buddy && block->parent) {
+ pr_err("buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ pr_err("buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_size(mm, buddy) != block_size) {
+ pr_err("buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_state(buddy) == block_state &&
+ block_state == I915_BUDDY_FREE) {
+ pr_err("block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int igt_check_blocks(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 expected_size,
+ bool is_contiguous)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = igt_check_block(mm, block);
+
+ if (!i915_buddy_block_is_allocated(block)) {
+ pr_err("block not allocated\n"),
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += i915_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ pr_err("size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev block, dump:\n");
+ igt_dump_block(mm, prev);
+ }
+
+ if (block) {
+ pr_err("bad block, dump:\n");
+ igt_dump_block(mm, block);
+ }
+
+ return err;
+}
+
+static int igt_check_mm(struct i915_buddy_mm *mm)
+{
+ struct i915_buddy_block *root;
+ struct i915_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ pr_err("n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ pr_err("root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = igt_check_block(mm, root);
+
+ if (!i915_buddy_block_is_free(root)) {
+ pr_err("root not free\n");
+ err = -EINVAL;
+ }
+
+ order = i915_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ pr_err("max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct i915_buddy_block,
+ link);
+ if (block != root) {
+ pr_err("root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += i915_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ pr_err("expected mm size=%llx, found=%llx\n", mm->size,
+ total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev root(%u), dump:\n", i - 1);
+ igt_dump_block(mm, prev);
+ }
+
+ if (root) {
+ pr_err("bad root(%u), dump:\n", i);
+ igt_dump_block(mm, root);
+ }
+
+ return err;
+}
+
+static void igt_mm_config(u64 *size, u64 *chunk_size)
+{
+ I915_RND_STATE(prng);
+ u64 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
+ ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
+ s = max(s & -ms, ms);
+
+ *chunk_size = ms;
+ *size = s;
+}
+
+static int igt_buddy_alloc_smoke(void *arg)
+{
+ struct i915_buddy_mm mm;
+ int max_order;
+ u64 chunk_size;
+ u64 mm_size;
+ int err;
+
+ igt_mm_config(&mm_size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
+
+ err = i915_buddy_init(&mm, mm_size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ struct i915_buddy_block *block;
+ int order;
+ LIST_HEAD(blocks);
+ u64 total;
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort\n");
+ break;
+ }
+
+ pr_info("filling from max_order=%u\n", max_order);
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ err = PTR_ERR(block);
+ if (err == -ENOMEM) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ pr_err("buddy_alloc with order=%d failed(%d)\n",
+ order, err);
+ }
+
+ break;
+ }
+
+ list_add_tail(&block->link, &blocks);
+
+ if (i915_buddy_block_order(block) != order) {
+ pr_err("buddy_alloc order mismatch\n");
+ err = -EINVAL;
+ break;
+ }
+
+ total += i915_buddy_block_size(&mm, block);
+ } while (total < mm.size);
+
+ if (!err)
+ err = igt_check_blocks(&mm, &blocks, total, false);
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+ if (err)
+ break;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+static int igt_buddy_alloc_pessimistic(void *arg)
+{
+ const unsigned int max_order = 16;
+ struct i915_buddy_block *block, *bn;
+ struct i915_buddy_mm mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order < max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ block = i915_buddy_alloc(&mm, max_order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_optimistic(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ int order;
+ int err;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ err = i915_buddy_init(&mm,
+ PAGE_SIZE * ((1 << (max_order + 1)) - 1),
+ PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ block = i915_buddy_alloc(&mm, 0);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_pathological(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ int order, top;
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+ }
+
+ for (order = top; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM for hole\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &holes);
+
+ block = i915_buddy_alloc(&mm, top);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ i915_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ list_splice_tail(&holes, &blocks);
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_range(void *arg)
+{
+ struct i915_buddy_mm mm;
+ unsigned long page_num;
+ LIST_HEAD(blocks);
+ u64 chunk_size;
+ u64 offset;
+ u64 size;
+ u64 rem;
+ int err;
+
+ igt_mm_config(&size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
+
+ err = i915_buddy_init(&mm, size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort, abort, abort!\n");
+ goto err_fini;
+ }
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct i915_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+
+ err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("alloc_range hit -ENOMEM with size=%llx\n",
+ size);
+ } else {
+ pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
+ offset, size, err);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct i915_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_range has no blocks\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_buddy_block_offset(block) != offset) {
+ pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ i915_buddy_block_offset(block), offset);
+ err = -EINVAL;
+ }
+
+ if (!err)
+ err = igt_check_blocks(&mm, &tmp, size, true);
+
+ list_splice_tail(&tmp, &blocks);
+
+ if (err)
+ break;
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+err_fini:
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+int i915_buddy_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_buddy_alloc_pessimistic),
+ SUBTEST(igt_buddy_alloc_optimistic),
+ SUBTEST(igt_buddy_alloc_pathological),
+ SUBTEST(igt_buddy_alloc_smoke),
+ SUBTEST(igt_buddy_alloc_range),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a01a6e87f1..bb6dd54a6ff3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,6 +8,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -115,7 +116,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
}
@@ -154,8 +155,6 @@ static int igt_gem_suspend(void *arg)
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
@@ -195,8 +194,6 @@ static int igt_gem_hibernate(void *arg)
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
@@ -210,8 +207,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index a3cb0aade6f1..cb30c669b1b7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -25,6 +25,7 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -47,26 +48,29 @@ static int populate_ggtt(struct drm_i915_private *i915,
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
- u64 size;
count = 0;
- for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- size += I915_GTT_PAGE_SIZE) {
+ do {
struct i915_vma *vma;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- quirk_add(obj, objects);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ if (vma == ERR_PTR(-ENOSPC))
+ break;
+
return PTR_ERR(vma);
+ }
+ quirk_add(obj, objects);
count++;
- }
+ } while (1);
+ pr_debug("Filled GGTT with %lu pages [%llu total]\n",
+ count, i915->ggtt.vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;
@@ -557,7 +561,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 1a60b9fe8221..31a51ca1ddcb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -208,9 +208,7 @@ static int igt_ppgtt_alloc(void *arg)
}
err_ppgtt_cleanup:
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_vm_put(&ppgtt->vm);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -1195,7 +1193,7 @@ static int igt_ggtt_page(void *arg)
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
}
- i915_gem_flush_ggtt_writes(i915);
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d5dc4427d664..1ccf0f731ac0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -12,7 +12,9 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, i915_timeline_live_selftests)
+selftest(gt_engines, intel_engine_live_selftests)
+selftest(gt_timelines, intel_timeline_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
@@ -24,7 +26,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
-selftest(contexts, i915_gem_context_live_selftests)
+selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 510eb176bb2c..b88084fe3269 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -15,7 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
-selftest(timelines, i915_timeline_mock_selftests)
+selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
@@ -25,3 +25,4 @@ selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
+selftest(buddy, i915_buddy_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 298bb7116c51..b3688543ed7d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,6 +27,8 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
+
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
@@ -44,9 +46,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0],
- i915->kernel_context,
- HZ / 10);
+ request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
goto out_unlock;
@@ -68,60 +68,63 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
}
+ i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed before submit!!\n");
- goto out_unlock;
+ goto out_request;
}
i915_request_add(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed immediately!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out!\n");
- goto out_unlock;
+ goto out_request;
}
if (!i915_request_completed(request)) {
pr_err("request not complete after waiting!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
- goto out_unlock;
+ goto out_request;
}
err = 0;
+out_request:
+ i915_request_put(request);
out_unlock:
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
@@ -138,7 +141,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
@@ -191,11 +194,15 @@ static int igt_request_rewind(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_request *request, *vip;
struct i915_gem_context *ctx[2];
+ struct intel_context *ce;
int err = -EINVAL;
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
+ ce = i915_gem_context_get_engine(ctx[0], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ request = mock_request(ce, 2 * HZ);
+ intel_context_put(ce);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -205,7 +212,10 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS0], ctx[1], 0);
+ ce = i915_gem_context_get_engine(ctx[1], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ vip = mock_request(ce, 0);
+ intel_context_put(ce);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -254,22 +264,19 @@ struct smoketest {
struct i915_gem_context **contexts;
atomic_long_t num_waits, num_fences;
int ncontexts, max_batch;
- struct i915_request *(*request_alloc)(struct i915_gem_context *,
- struct intel_engine_cs *);
+ struct i915_request *(*request_alloc)(struct intel_context *ce);
};
static struct i915_request *
-__mock_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__mock_request_alloc(struct intel_context *ce)
{
- return mock_request(engine, ctx, 0);
+ return mock_request(ce, 0);
}
static struct i915_request *
-__live_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__live_request_alloc(struct intel_context *ce)
{
- return igt_request_alloc(ctx, engine);
+ return intel_context_create_request(ce);
}
static int __igt_breadcrumbs_smoketest(void *arg)
@@ -328,10 +335,14 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_gem_context *ctx =
t->contexts[order[n] % t->ncontexts];
struct i915_request *rq;
+ struct intel_context *ce;
mutex_lock(BKL);
- rq = t->request_alloc(ctx, t->engine);
+ ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+ rq = t->request_alloc(ce);
+ intel_context_put(ce);
if (IS_ERR(rq)) {
mutex_unlock(BKL);
err = PTR_ERR(rq);
@@ -366,14 +377,16 @@ static int __igt_breadcrumbs_smoketest(void *arg)
if (!wait_event_timeout(wait->wait,
i915_sw_fence_done(wait),
- HZ / 2)) {
+ 5 * HZ)) {
struct i915_request *rq = requests[count - 1];
- pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n",
- count,
+ pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
+ atomic_read(&wait->pending), count,
rq->fence.context, rq->fence.seqno,
t->engine->name);
- i915_gem_set_wedged(t->engine->i915);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(t->engine->gt);
GEM_BUG_ON(!i915_request_completed(rq));
i915_sw_fence_wait(wait);
err = -EIO;
@@ -622,7 +635,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -791,7 +804,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
return vma;
@@ -809,7 +822,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(batch->vm->i915);
+ intel_gt_chipset_flush(batch->vm->gt);
i915_gem_object_unpin_map(batch->obj);
@@ -863,7 +876,9 @@ static int live_all_engines(void *arg)
request[id]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[id], batch->obj, 0);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
@@ -979,7 +994,9 @@ static int live_sequential_engines(void *arg)
request[id]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[id], batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
@@ -1031,7 +1048,7 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_gem_object_unpin_map(request[id]->batch->obj);
}
@@ -1227,7 +1244,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index b18eaefef798..438ea0eaa416 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -26,6 +26,8 @@
#include "../i915_drv.h"
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+
struct i915_selftest i915_selftest __read_mostly = {
.timeout_ms = 1000,
};
@@ -183,7 +185,7 @@ int i915_live_selftests(struct pci_dev *pdev)
if (!i915_selftest.live)
return 0;
- err = run_selftests(live, to_i915(pci_get_drvdata(pdev)));
+ err = run_selftests(live, pdev_to_i915(pdev));
if (err) {
i915_selftest.live = err;
return err;
@@ -240,7 +242,61 @@ static bool apply_subtest_filter(const char *caller, const char *name)
return result;
}
+int __i915_nop_setup(void *data)
+{
+ return 0;
+}
+
+int __i915_nop_teardown(int err, void *data)
+{
+ return err;
+}
+
+int __i915_live_setup(void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ return intel_gt_terminally_wedged(&i915->gt);
+}
+
+int __i915_live_teardown(int err, void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ return err;
+}
+
+int __intel_gt_live_setup(void *data)
+{
+ struct intel_gt *gt = data;
+
+ return intel_gt_terminally_wedged(gt);
+}
+
+int __intel_gt_live_teardown(int err, void *data)
+{
+ struct intel_gt *gt = data;
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(gt->i915);
+
+ return err;
+}
+
int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
const struct i915_subtest *st,
unsigned int count,
void *data)
@@ -255,10 +311,17 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
+ err = setup(data);
+ if (err) {
+ pr_err(DRIVER_NAME "/%s: setup failed for %s\n",
+ caller, st->name);
+ return err;
+ }
+
pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
- err = st->func(data);
+ err = teardown(st->func(data), data);
if (err && err != -EINTR) {
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
caller, st->name, err);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fbc79b14823a..a5bec0a4cdcc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -193,6 +193,8 @@ static int igt_vma_create(void *arg)
list_del_init(&ctx->link);
mock_context_close(ctx);
}
+
+ cond_resched();
}
end:
@@ -341,6 +343,8 @@ static int igt_vma_pin1(void *arg)
goto out;
}
}
+
+ cond_resched();
}
err = 0;
@@ -597,6 +601,8 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
+
+ cond_resched();
}
}
}
@@ -752,6 +758,8 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
+
+ cond_resched();
}
}
@@ -961,6 +969,8 @@ static int igt_vma_remapped_gtt(void *arg)
}
}
i915_vma_unpin_iomap(vma);
+
+ cond_resched();
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 5bfd1b2626a2..d3b5eb402d33 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_selftest.h"
@@ -13,7 +14,7 @@
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
- int ret = i915_terminally_wedged(i915) ? -EIO : 0;
+ int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
int repeat = !!(flags & I915_WAIT_LOCKED);
cond_resched();
@@ -27,7 +28,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
__builtin_return_address(0));
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
repeat = 0;
ret = -EIO;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 587df6fd4ffe..7ec8f8b049c6 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -7,47 +7,45 @@
#include "igt_reset.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
#include "../i915_drv.h"
-void igt_global_reset_lock(struct drm_i915_private *i915)
+void igt_global_reset_lock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
+ pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags);
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
+ while (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
TASK_UNINTERRUPTIBLE);
}
}
-void igt_global_reset_unlock(struct drm_i915_private *i915)
+void igt_global_reset_unlock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ for_each_engine(engine, gt->i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
+ clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+ wake_up_all(&gt->reset.queue);
}
-bool igt_force_reset(struct drm_i915_private *i915)
+bool igt_force_reset(struct intel_gt *gt)
{
- i915_gem_set_wedged(i915);
- i915_reset(i915, 0, NULL);
+ intel_gt_set_wedged(gt);
+ intel_gt_reset(gt, 0, NULL);
- return !i915_reset_failed(i915);
+ return !intel_gt_is_wedged(gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
index 363bd853e50f..851873b67ab3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.h
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -7,10 +7,12 @@
#ifndef __I915_SELFTESTS_IGT_RESET_H__
#define __I915_SELFTESTS_IGT_RESET_H__
-#include "../i915_drv.h"
+#include <linux/types.h>
-void igt_global_reset_lock(struct drm_i915_private *i915);
-void igt_global_reset_unlock(struct drm_i915_private *i915);
-bool igt_force_reset(struct drm_i915_private *i915);
+struct intel_gt;
+
+void igt_global_reset_lock(struct intel_gt *gt);
+void igt_global_reset_unlock(struct intel_gt *gt);
+bool igt_force_reset(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 1e59b543cf27..11f04ad48e68 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,29 +3,30 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
#include "igt_spinner.h"
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
{
unsigned int mode;
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
+ spin->gt = gt;
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->hws)) {
err = PTR_ERR(spin->hws);
goto err;
}
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->obj)) {
err = PTR_ERR(spin->obj);
goto err_hws;
@@ -39,7 +40,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
}
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
- mode = i915_coherent_map_type(i915);
+ mode = i915_coherent_map_type(gt->i915);
vaddr = i915_gem_object_pin_map(spin->obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -77,7 +78,10 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, flags);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
@@ -85,20 +89,22 @@ static int move_to_active(struct i915_vma *vma,
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
- vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
+ GEM_BUG_ON(spin->gt != ce->vm->gt);
+
+ vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
- hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
+ hws = i915_vma_instance(spin->hws, ce->vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
@@ -110,7 +116,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (err)
goto unpin_vma;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
@@ -138,7 +144,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = upper_32_bits(vma->node.start);
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(engine->gt);
if (engine->emit_init_breadcrumb &&
rq->timeline->has_initial_breadcrumb) {
@@ -172,7 +178,7 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
void igt_spinner_end(struct igt_spinner *spin)
{
*spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(spin->gt);
}
void igt_spinner_fini(struct igt_spinner *spin)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 34a88ac9b47a..ec62c9ef320b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -14,21 +14,22 @@
#include "i915_request.h"
#include "i915_selftest.h"
+struct intel_gt;
+
struct igt_spinner {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
u32 *batch;
void *seqno;
};
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
void igt_spinner_fini(struct igt_spinner *spin);
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command);
void igt_spinner_end(struct igt_spinner *spin);
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
deleted file mode 100644
index 08e5ff11bbd9..000000000000
--- a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- */
-
-#ifndef IGT_WEDGE_ME_H
-#define IGT_WEDGE_ME_H
-
-#include <linux/workqueue.h>
-
-#include "../i915_gem.h"
-
-struct drm_i915_private;
-
-struct igt_wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const char *name;
-};
-
-static void __igt_wedge_me(struct work_struct *work)
-{
- struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%s timed out, cancelling test.\n", w->name);
-
- GEM_TRACE("%s timed out.\n", w->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(w->i915);
-}
-
-static void __igt_init_wedge(struct igt_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
-{
- w->i915 = i915;
- w->name = name;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __igt_fini_wedge(struct igt_wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __igt_fini_wedge((W)))
-
-#endif /* IGT_WEDGE_ME_H */
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index b976c12817c5..080b90b63d16 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -40,6 +40,7 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
+ fence->error = 0;
fence->flags = (unsigned long)nop_fence_notify;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 64bc51400ae7..01a89c071bf5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,6 +25,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "gt/intel_gt.h"
#include "gt/mock_engine.h"
#include "mock_request.h"
@@ -67,7 +68,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
@@ -179,14 +180,9 @@ struct drm_i915_private *mock_gem_device(void)
mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
- intel_gt_pm_init(i915);
+ intel_gt_init_early(&i915->gt, i915);
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
- init_waitqueue_head(&i915->gpu_error.wait_queue);
- init_waitqueue_head(&i915->gpu_error.reset_queue);
- init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
- mutex_init(&i915->gpu_error.wedge_mutex);
-
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
goto err_drv;
@@ -198,11 +194,7 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915_timelines_init(i915);
-
- INIT_LIST_HEAD(&i915->gt.active_rings);
- INIT_LIST_HEAD(&i915->gt.closed_vma);
- spin_lock_init(&i915->gt.closed_lock);
+ intel_timelines_init(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -221,6 +213,7 @@ struct drm_i915_private *mock_gem_device(void)
if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
+ intel_engines_driver_register(i915);
mutex_unlock(&i915->drm.struct_mutex);
WARN_ON(i915_gemfs_init(i915));
@@ -233,7 +226,7 @@ err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f625c307a406..e62a67e0f79c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -98,6 +98,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
memset(ggtt, 0, sizeof(*ggtt));
+ ggtt->vm.gt = &i915->gt;
ggtt->vm.i915 = i915;
ggtt->vm.is_ggtt = true;
@@ -116,6 +117,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+ intel_gt_init_hw(i915);
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 9390fc09984b..09f747228dff 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -28,14 +28,12 @@
#include "mock_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay)
+mock_request(struct intel_context *ce, unsigned long delay)
{
struct i915_request *request;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = igt_request_alloc(context, engine);
+ request = intel_context_create_request(ce);
if (IS_ERR(request))
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 4acf0211df20..8907b60c290d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -30,9 +30,7 @@
#include "../i915_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay);
+mock_request(struct intel_context *ce, unsigned long delay);
bool mock_cancel_request(struct i915_request *request);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index ff8999c63a12..49585f16d4a2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -41,6 +41,6 @@ __nop_read(64)
void mock_uncore_init(struct intel_uncore *uncore)
{
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
- ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
}